aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb4vf
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/net/cxgb4vf
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/net/cxgb4vf')
-rw-r--r--drivers/net/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c322
-rw-r--r--drivers/net/cxgb4vf/sge.c147
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h27
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c133
5 files changed, 389 insertions, 248 deletions
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 8ea01962e045..4fd821aadc8a 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -60,7 +60,7 @@ enum {
60 * MSI-X interrupt index usage. 60 * MSI-X interrupt index usage.
61 */ 61 */
62 MSIX_FW = 0, /* MSI-X index for firmware Q */ 62 MSIX_FW = 0, /* MSI-X index for firmware Q */
63 MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */ 63 MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
64 MSIX_EXTRAS = 1, 64 MSIX_EXTRAS = 1,
65 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS, 65 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
66 66
@@ -97,17 +97,11 @@ struct port_info {
97 u16 rss_size; /* size of VI's RSS table slice */ 97 u16 rss_size; /* size of VI's RSS table slice */
98 u8 pidx; /* index into adapter port[] */ 98 u8 pidx; /* index into adapter port[] */
99 u8 port_id; /* physical port ID */ 99 u8 port_id; /* physical port ID */
100 u8 rx_offload; /* CSO, etc. */
101 u8 nqsets; /* # of "Queue Sets" */ 100 u8 nqsets; /* # of "Queue Sets" */
102 u8 first_qset; /* index of first "Queue Set" */ 101 u8 first_qset; /* index of first "Queue Set" */
103 struct link_config link_cfg; /* physical port configuration */ 102 struct link_config link_cfg; /* physical port configuration */
104}; 103};
105 104
106/* port_info.rx_offload flags */
107enum {
108 RX_CSO = 1 << 0,
109};
110
111/* 105/*
112 * Scatter Gather Engine resources for the "adapter". Our ingress and egress 106 * Scatter Gather Engine resources for the "adapter". Our ingress and egress
113 * queues are organized into "Queue Sets" with one ingress and one egress 107 * queues are organized into "Queue Sets" with one ingress and one egress
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 7b6d07f50c71..e71c08e547e4 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -280,9 +280,7 @@ static void name_msix_vecs(struct adapter *adapter)
280 const struct port_info *pi = netdev_priv(dev); 280 const struct port_info *pi = netdev_priv(dev);
281 int qs, msi; 281 int qs, msi;
282 282
283 for (qs = 0, msi = MSIX_NIQFLINT; 283 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
284 qs < pi->nqsets;
285 qs++, msi++) {
286 snprintf(adapter->msix_info[msi].desc, namelen, 284 snprintf(adapter->msix_info[msi].desc, namelen,
287 "%s-%d", dev->name, qs); 285 "%s-%d", dev->name, qs);
288 adapter->msix_info[msi].desc[namelen] = 0; 286 adapter->msix_info[msi].desc[namelen] = 0;
@@ -309,7 +307,7 @@ static int request_msix_queue_irqs(struct adapter *adapter)
309 /* 307 /*
310 * Ethernet queues. 308 * Ethernet queues.
311 */ 309 */
312 msi = MSIX_NIQFLINT; 310 msi = MSIX_IQFLINT;
313 for_each_ethrxq(s, rxq) { 311 for_each_ethrxq(s, rxq) {
314 err = request_irq(adapter->msix_info[msi].vec, 312 err = request_irq(adapter->msix_info[msi].vec,
315 t4vf_sge_intr_msix, 0, 313 t4vf_sge_intr_msix, 0,
@@ -337,7 +335,7 @@ static void free_msix_queue_irqs(struct adapter *adapter)
337 int rxq, msi; 335 int rxq, msi;
338 336
339 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); 337 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
340 msi = MSIX_NIQFLINT; 338 msi = MSIX_IQFLINT;
341 for_each_ethrxq(s, rxq) 339 for_each_ethrxq(s, rxq)
342 free_irq(adapter->msix_info[msi++].vec, 340 free_irq(adapter->msix_info[msi++].vec,
343 &s->ethrxq[rxq].rspq); 341 &s->ethrxq[rxq].rspq);
@@ -527,7 +525,7 @@ static int setup_sge_queues(struct adapter *adapter)
527 * brought up at which point lots of things get nailed down 525 * brought up at which point lots of things get nailed down
528 * permanently ... 526 * permanently ...
529 */ 527 */
530 msix = MSIX_NIQFLINT; 528 msix = MSIX_IQFLINT;
531 for_each_port(adapter, pidx) { 529 for_each_port(adapter, pidx) {
532 struct net_device *dev = adapter->port[pidx]; 530 struct net_device *dev = adapter->port[pidx];
533 struct port_info *pi = netdev_priv(dev); 531 struct port_info *pi = netdev_priv(dev);
@@ -748,11 +746,22 @@ static int cxgb4vf_open(struct net_device *dev)
748 /* 746 /*
749 * Note that this interface is up and start everything up ... 747 * Note that this interface is up and start everything up ...
750 */ 748 */
751 dev->real_num_tx_queues = pi->nqsets; 749 netif_set_real_num_tx_queues(dev, pi->nqsets);
752 set_bit(pi->port_id, &adapter->open_device_map); 750 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
753 link_start(dev); 751 if (err)
752 goto err_unwind;
753 err = link_start(dev);
754 if (err)
755 goto err_unwind;
756
754 netif_tx_start_all_queues(dev); 757 netif_tx_start_all_queues(dev);
758 set_bit(pi->port_id, &adapter->open_device_map);
755 return 0; 759 return 0;
760
761err_unwind:
762 if (adapter->open_device_map == 0)
763 adapter_down(adapter);
764 return err;
756} 765}
757 766
758/* 767/*
@@ -761,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev)
761 */ 770 */
762static int cxgb4vf_stop(struct net_device *dev) 771static int cxgb4vf_stop(struct net_device *dev)
763{ 772{
764 int ret;
765 struct port_info *pi = netdev_priv(dev); 773 struct port_info *pi = netdev_priv(dev);
766 struct adapter *adapter = pi->adapter; 774 struct adapter *adapter = pi->adapter;
767 775
768 netif_tx_stop_all_queues(dev); 776 netif_tx_stop_all_queues(dev);
769 netif_carrier_off(dev); 777 netif_carrier_off(dev);
770 ret = t4vf_enable_vi(adapter, pi->viid, false, false); 778 t4vf_enable_vi(adapter, pi->viid, false, false);
771 pi->link_cfg.link_ok = 0; 779 pi->link_cfg.link_ok = 0;
772 780
773 clear_bit(pi->port_id, &adapter->open_device_map); 781 clear_bit(pi->port_id, &adapter->open_device_map);
@@ -811,40 +819,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
811} 819}
812 820
813/* 821/*
814 * Collect up to maxaddrs worth of a netdevice's unicast addresses into an 822 * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
815 * array of addrss pointers and return the number collected. 823 * at a specified offset within the list, into an array of addrss pointers and
824 * return the number collected.
816 */ 825 */
817static inline int collect_netdev_uc_list_addrs(const struct net_device *dev, 826static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
818 const u8 **addr, 827 const u8 **addr,
819 unsigned int maxaddrs) 828 unsigned int offset,
829 unsigned int maxaddrs)
820{ 830{
831 unsigned int index = 0;
821 unsigned int naddr = 0; 832 unsigned int naddr = 0;
822 const struct netdev_hw_addr *ha; 833 const struct netdev_hw_addr *ha;
823 834
824 for_each_dev_addr(dev, ha) { 835 for_each_dev_addr(dev, ha)
825 addr[naddr++] = ha->addr; 836 if (index++ >= offset) {
826 if (naddr >= maxaddrs) 837 addr[naddr++] = ha->addr;
827 break; 838 if (naddr >= maxaddrs)
828 } 839 break;
840 }
829 return naddr; 841 return naddr;
830} 842}
831 843
832/* 844/*
833 * Collect up to maxaddrs worth of a netdevice's multicast addresses into an 845 * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
834 * array of addrss pointers and return the number collected. 846 * at a specified offset within the list, into an array of addrss pointers and
847 * return the number collected.
835 */ 848 */
836static inline int collect_netdev_mc_list_addrs(const struct net_device *dev, 849static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
837 const u8 **addr, 850 const u8 **addr,
838 unsigned int maxaddrs) 851 unsigned int offset,
852 unsigned int maxaddrs)
839{ 853{
854 unsigned int index = 0;
840 unsigned int naddr = 0; 855 unsigned int naddr = 0;
841 const struct netdev_hw_addr *ha; 856 const struct netdev_hw_addr *ha;
842 857
843 netdev_for_each_mc_addr(ha, dev) { 858 netdev_for_each_mc_addr(ha, dev)
844 addr[naddr++] = ha->addr; 859 if (index++ >= offset) {
845 if (naddr >= maxaddrs) 860 addr[naddr++] = ha->addr;
846 break; 861 if (naddr >= maxaddrs)
847 } 862 break;
863 }
848 return naddr; 864 return naddr;
849} 865}
850 866
@@ -857,16 +873,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
857 u64 mhash = 0; 873 u64 mhash = 0;
858 u64 uhash = 0; 874 u64 uhash = 0;
859 bool free = true; 875 bool free = true;
860 u16 filt_idx[7]; 876 unsigned int offset, naddr;
861 const u8 *addr[7]; 877 const u8 *addr[7];
862 int ret, naddr = 0; 878 int ret;
863 const struct port_info *pi = netdev_priv(dev); 879 const struct port_info *pi = netdev_priv(dev);
864 880
865 /* first do the secondary unicast addresses */ 881 /* first do the secondary unicast addresses */
866 naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr)); 882 for (offset = 0; ; offset += naddr) {
867 if (naddr > 0) { 883 naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
884 ARRAY_SIZE(addr));
885 if (naddr == 0)
886 break;
887
868 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, 888 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
869 naddr, addr, filt_idx, &uhash, sleep); 889 naddr, addr, NULL, &uhash, sleep);
870 if (ret < 0) 890 if (ret < 0)
871 return ret; 891 return ret;
872 892
@@ -874,12 +894,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
874 } 894 }
875 895
876 /* next set up the multicast addresses */ 896 /* next set up the multicast addresses */
877 naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr)); 897 for (offset = 0; ; offset += naddr) {
878 if (naddr > 0) { 898 naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
899 ARRAY_SIZE(addr));
900 if (naddr == 0)
901 break;
902
879 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, 903 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
880 naddr, addr, filt_idx, &mhash, sleep); 904 naddr, addr, NULL, &mhash, sleep);
881 if (ret < 0) 905 if (ret < 0)
882 return ret; 906 return ret;
907 free = false;
883 } 908 }
884 909
885 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, 910 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
@@ -1100,18 +1125,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1100 return 0; 1125 return 0;
1101} 1126}
1102 1127
1103/*
1104 * Return a TX Queue on which to send the specified skb.
1105 */
1106static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
1107{
1108 /*
1109 * XXX For now just use the default hash but we probably want to
1110 * XXX look at other possibilities ...
1111 */
1112 return skb_tx_hash(dev, skb);
1113}
1114
1115#ifdef CONFIG_NET_POLL_CONTROLLER 1128#ifdef CONFIG_NET_POLL_CONTROLLER
1116/* 1129/*
1117 * Poll all of our receive queues. This is called outside of normal interrupt 1130 * Poll all of our receive queues. This is called outside of normal interrupt
@@ -1154,7 +1167,8 @@ static int cxgb4vf_get_settings(struct net_device *dev,
1154 1167
1155 cmd->supported = pi->link_cfg.supported; 1168 cmd->supported = pi->link_cfg.supported;
1156 cmd->advertising = pi->link_cfg.advertising; 1169 cmd->advertising = pi->link_cfg.advertising;
1157 cmd->speed = netif_carrier_ok(dev) ? pi->link_cfg.speed : -1; 1170 ethtool_cmd_speed_set(cmd,
1171 netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
1158 cmd->duplex = DUPLEX_FULL; 1172 cmd->duplex = DUPLEX_FULL;
1159 1173
1160 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 1174 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -1313,37 +1327,22 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev,
1313} 1327}
1314 1328
1315/* 1329/*
1316 * Return whether RX Checksum Offloading is currently enabled for the device. 1330 * Identify the port by blinking the port's LED.
1317 */
1318static u32 cxgb4vf_get_rx_csum(struct net_device *dev)
1319{
1320 struct port_info *pi = netdev_priv(dev);
1321
1322 return (pi->rx_offload & RX_CSO) != 0;
1323}
1324
1325/*
1326 * Turn RX Checksum Offloading on or off for the device.
1327 */ 1331 */
1328static int cxgb4vf_set_rx_csum(struct net_device *dev, u32 csum) 1332static int cxgb4vf_phys_id(struct net_device *dev,
1333 enum ethtool_phys_id_state state)
1329{ 1334{
1335 unsigned int val;
1330 struct port_info *pi = netdev_priv(dev); 1336 struct port_info *pi = netdev_priv(dev);
1331 1337
1332 if (csum) 1338 if (state == ETHTOOL_ID_ACTIVE)
1333 pi->rx_offload |= RX_CSO; 1339 val = 0xffff;
1340 else if (state == ETHTOOL_ID_INACTIVE)
1341 val = 0;
1334 else 1342 else
1335 pi->rx_offload &= ~RX_CSO; 1343 return -EINVAL;
1336 return 0;
1337}
1338
1339/*
1340 * Identify the port by blinking the port's LED.
1341 */
1342static int cxgb4vf_phys_id(struct net_device *dev, u32 id)
1343{
1344 struct port_info *pi = netdev_priv(dev);
1345 1344
1346 return t4vf_identify_port(pi->adapter, pi->viid, 5); 1345 return t4vf_identify_port(pi->adapter, pi->viid, val);
1347} 1346}
1348 1347
1349/* 1348/*
@@ -1355,6 +1354,8 @@ struct queue_port_stats {
1355 u64 rx_csum; 1354 u64 rx_csum;
1356 u64 vlan_ex; 1355 u64 vlan_ex;
1357 u64 vlan_ins; 1356 u64 vlan_ins;
1357 u64 lro_pkts;
1358 u64 lro_merged;
1358}; 1359};
1359 1360
1360/* 1361/*
@@ -1392,6 +1393,8 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
1392 "RxCsumGood ", 1393 "RxCsumGood ",
1393 "VLANextractions ", 1394 "VLANextractions ",
1394 "VLANinsertions ", 1395 "VLANinsertions ",
1396 "GROPackets ",
1397 "GROMerged ",
1395}; 1398};
1396 1399
1397/* 1400/*
@@ -1441,6 +1444,8 @@ static void collect_sge_port_stats(const struct adapter *adapter,
1441 stats->rx_csum += rxq->stats.rx_cso; 1444 stats->rx_csum += rxq->stats.rx_cso;
1442 stats->vlan_ex += rxq->stats.vlan_ex; 1445 stats->vlan_ex += rxq->stats.vlan_ex;
1443 stats->vlan_ins += txq->vlan_ins; 1446 stats->vlan_ins += txq->vlan_ins;
1447 stats->lro_pkts += rxq->stats.lro_pkts;
1448 stats->lro_merged += rxq->stats.lro_merged;
1444 } 1449 }
1445} 1450}
1446 1451
@@ -1537,16 +1542,9 @@ static void cxgb4vf_get_wol(struct net_device *dev,
1537} 1542}
1538 1543
1539/* 1544/*
1540 * Set TCP Segmentation Offloading feature capabilities. 1545 * TCP Segmentation Offload flags which we support.
1541 */ 1546 */
1542static int cxgb4vf_set_tso(struct net_device *dev, u32 tso) 1547#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1543{
1544 if (tso)
1545 dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1546 else
1547 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1548 return 0;
1549}
1550 1548
1551static struct ethtool_ops cxgb4vf_ethtool_ops = { 1549static struct ethtool_ops cxgb4vf_ethtool_ops = {
1552 .get_settings = cxgb4vf_get_settings, 1550 .get_settings = cxgb4vf_get_settings,
@@ -1558,19 +1556,14 @@ static struct ethtool_ops cxgb4vf_ethtool_ops = {
1558 .get_coalesce = cxgb4vf_get_coalesce, 1556 .get_coalesce = cxgb4vf_get_coalesce,
1559 .set_coalesce = cxgb4vf_set_coalesce, 1557 .set_coalesce = cxgb4vf_set_coalesce,
1560 .get_pauseparam = cxgb4vf_get_pauseparam, 1558 .get_pauseparam = cxgb4vf_get_pauseparam,
1561 .get_rx_csum = cxgb4vf_get_rx_csum,
1562 .set_rx_csum = cxgb4vf_set_rx_csum,
1563 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1564 .set_sg = ethtool_op_set_sg,
1565 .get_link = ethtool_op_get_link, 1559 .get_link = ethtool_op_get_link,
1566 .get_strings = cxgb4vf_get_strings, 1560 .get_strings = cxgb4vf_get_strings,
1567 .phys_id = cxgb4vf_phys_id, 1561 .set_phys_id = cxgb4vf_phys_id,
1568 .get_sset_count = cxgb4vf_get_sset_count, 1562 .get_sset_count = cxgb4vf_get_sset_count,
1569 .get_ethtool_stats = cxgb4vf_get_ethtool_stats, 1563 .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1570 .get_regs_len = cxgb4vf_get_regs_len, 1564 .get_regs_len = cxgb4vf_get_regs_len,
1571 .get_regs = cxgb4vf_get_regs, 1565 .get_regs = cxgb4vf_get_regs,
1572 .get_wol = cxgb4vf_get_wol, 1566 .get_wol = cxgb4vf_get_wol,
1573 .set_tso = cxgb4vf_set_tso,
1574}; 1567};
1575 1568
1576/* 1569/*
@@ -2016,7 +2009,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2016{ 2009{
2017 int i; 2010 int i;
2018 2011
2019 BUG_ON(adapter->debugfs_root == NULL); 2012 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2020 2013
2021 /* 2014 /*
2022 * Debugfs support is best effort. 2015 * Debugfs support is best effort.
@@ -2035,9 +2028,9 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2035 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave 2028 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2036 * it to our caller to tear down the directory (debugfs_root). 2029 * it to our caller to tear down the directory (debugfs_root).
2037 */ 2030 */
2038static void __devexit cleanup_debugfs(struct adapter *adapter) 2031static void cleanup_debugfs(struct adapter *adapter)
2039{ 2032{
2040 BUG_ON(adapter->debugfs_root == NULL); 2033 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2041 2034
2042 /* 2035 /*
2043 * Unlike our sister routine cleanup_proc(), we don't need to remove 2036 * Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2053,7 +2046,7 @@ static void __devexit cleanup_debugfs(struct adapter *adapter)
2053 * adapter parameters we're going to be using and initialize basic adapter 2046 * adapter parameters we're going to be using and initialize basic adapter
2054 * hardware support. 2047 * hardware support.
2055 */ 2048 */
2056static int adap_init0(struct adapter *adapter) 2049static int __devinit adap_init0(struct adapter *adapter)
2057{ 2050{
2058 struct vf_resources *vfres = &adapter->params.vfres; 2051 struct vf_resources *vfres = &adapter->params.vfres;
2059 struct sge_params *sge_params = &adapter->params.sge; 2052 struct sge_params *sge_params = &adapter->params.sge;
@@ -2072,6 +2065,22 @@ static int adap_init0(struct adapter *adapter)
2072 } 2065 }
2073 2066
2074 /* 2067 /*
2068 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2069 * 2.6.31 and later we can't call pci_reset_function() in order to
2070 * issue an FLR because of a self- deadlock on the device semaphore.
2071 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2072 * cases where they're needed -- for instance, some versions of KVM
2073 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2074 * use the firmware based reset in order to reset any per function
2075 * state.
2076 */
2077 err = t4vf_fw_reset(adapter);
2078 if (err < 0) {
2079 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2080 return err;
2081 }
2082
2083 /*
2075 * Grab basic operational parameters. These will predominantly have 2084 * Grab basic operational parameters. These will predominantly have
2076 * been set up by the Physical Function Driver or will be hard coded 2085 * been set up by the Physical Function Driver or will be hard coded
2077 * into the adapter. We just have to live with them ... Note that 2086 * into the adapter. We just have to live with them ... Note that
@@ -2243,6 +2252,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2243{ 2252{
2244 struct sge *s = &adapter->sge; 2253 struct sge *s = &adapter->sge;
2245 int q10g, n10g, qidx, pidx, qs; 2254 int q10g, n10g, qidx, pidx, qs;
2255 size_t iqe_size;
2246 2256
2247 /* 2257 /*
2248 * We should not be called till we know how many Queue Sets we can 2258 * We should not be called till we know how many Queue Sets we can
@@ -2287,6 +2297,13 @@ static void __devinit cfg_queues(struct adapter *adapter)
2287 s->ethqsets = qidx; 2297 s->ethqsets = qidx;
2288 2298
2289 /* 2299 /*
2300 * The Ingress Queue Entry Size for our various Response Queues needs
2301 * to be big enough to accommodate the largest message we can receive
2302 * from the chip/firmware; which is 64 bytes ...
2303 */
2304 iqe_size = 64;
2305
2306 /*
2290 * Set up default Queue Set parameters ... Start off with the 2307 * Set up default Queue Set parameters ... Start off with the
2291 * shortest interrupt holdoff timer. 2308 * shortest interrupt holdoff timer.
2292 */ 2309 */
@@ -2294,7 +2311,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2294 struct sge_eth_rxq *rxq = &s->ethrxq[qs]; 2311 struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2295 struct sge_eth_txq *txq = &s->ethtxq[qs]; 2312 struct sge_eth_txq *txq = &s->ethtxq[qs];
2296 2313
2297 init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES); 2314 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2298 rxq->fl.size = 72; 2315 rxq->fl.size = 72;
2299 txq->q.size = 1024; 2316 txq->q.size = 1024;
2300 } 2317 }
@@ -2303,8 +2320,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2303 * The firmware event queue is used for link state changes and 2320 * The firmware event queue is used for link state changes and
2304 * notifications of TX DMA completions. 2321 * notifications of TX DMA completions.
2305 */ 2322 */
2306 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, 2323 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2307 L1_CACHE_BYTES);
2308 2324
2309 /* 2325 /*
2310 * The forwarded interrupt queue is used when we're in MSI interrupt 2326 * The forwarded interrupt queue is used when we're in MSI interrupt
@@ -2320,7 +2336,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2320 * any time ... 2336 * any time ...
2321 */ 2337 */
2322 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, 2338 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2323 L1_CACHE_BYTES); 2339 iqe_size);
2324} 2340}
2325 2341
2326/* 2342/*
@@ -2414,7 +2430,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
2414 .ndo_get_stats = cxgb4vf_get_stats, 2430 .ndo_get_stats = cxgb4vf_get_stats,
2415 .ndo_set_rx_mode = cxgb4vf_set_rxmode, 2431 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2416 .ndo_set_mac_address = cxgb4vf_set_mac_addr, 2432 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2417 .ndo_select_queue = cxgb4vf_select_queue,
2418 .ndo_validate_addr = eth_validate_addr, 2433 .ndo_validate_addr = eth_validate_addr,
2419 .ndo_do_ioctl = cxgb4vf_do_ioctl, 2434 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2420 .ndo_change_mtu = cxgb4vf_change_mtu, 2435 .ndo_change_mtu = cxgb4vf_change_mtu,
@@ -2443,17 +2458,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2443 struct net_device *netdev; 2458 struct net_device *netdev;
2444 2459
2445 /* 2460 /*
2446 * Vet our module parameters.
2447 */
2448 if (msi != MSI_MSIX && msi != MSI_MSI) {
2449 dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
2450 " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
2451 MSI_MSI);
2452 err = -EINVAL;
2453 goto err_out;
2454 }
2455
2456 /*
2457 * Print our driver banner the first time we're called to initialize a 2461 * Print our driver banner the first time we're called to initialize a
2458 * device. 2462 * device.
2459 */ 2463 */
@@ -2462,7 +2466,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2462 version_printed = 1; 2466 version_printed = 1;
2463 } 2467 }
2464 2468
2465
2466 /* 2469 /*
2467 * Initialize generic PCI device state. 2470 * Initialize generic PCI device state.
2468 */ 2471 */
@@ -2595,20 +2598,19 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2595 * it. 2598 * it.
2596 */ 2599 */
2597 pi->xact_addr_filt = -1; 2600 pi->xact_addr_filt = -1;
2598 pi->rx_offload = RX_CSO;
2599 netif_carrier_off(netdev); 2601 netif_carrier_off(netdev);
2600 netif_tx_stop_all_queues(netdev);
2601 netdev->irq = pdev->irq; 2602 netdev->irq = pdev->irq;
2602 2603
2603 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 2604 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
2604 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2605 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2605 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2606 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
2606 NETIF_F_GRO); 2607 netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
2608 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2609 NETIF_F_HIGHDMA;
2610 netdev->features = netdev->hw_features |
2611 NETIF_F_HW_VLAN_RX;
2607 if (pci_using_dac) 2612 if (pci_using_dac)
2608 netdev->features |= NETIF_F_HIGHDMA; 2613 netdev->features |= NETIF_F_HIGHDMA;
2609 netdev->vlan_features =
2610 (netdev->features &
2611 ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX));
2612 2614
2613#ifdef HAVE_NET_DEVICE_OPS 2615#ifdef HAVE_NET_DEVICE_OPS
2614 netdev->netdev_ops = &cxgb4vf_netdev_ops; 2616 netdev->netdev_ops = &cxgb4vf_netdev_ops;
@@ -2622,7 +2624,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2622 netdev->do_ioctl = cxgb4vf_do_ioctl; 2624 netdev->do_ioctl = cxgb4vf_do_ioctl;
2623 netdev->change_mtu = cxgb4vf_change_mtu; 2625 netdev->change_mtu = cxgb4vf_change_mtu;
2624 netdev->set_mac_address = cxgb4vf_set_mac_addr; 2626 netdev->set_mac_address = cxgb4vf_set_mac_addr;
2625 netdev->select_queue = cxgb4vf_select_queue;
2626#ifdef CONFIG_NET_POLL_CONTROLLER 2627#ifdef CONFIG_NET_POLL_CONTROLLER
2627 netdev->poll_controller = cxgb4vf_poll_controller; 2628 netdev->poll_controller = cxgb4vf_poll_controller;
2628#endif 2629#endif
@@ -2668,11 +2669,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2668 /* 2669 /*
2669 * Set up our debugfs entries. 2670 * Set up our debugfs entries.
2670 */ 2671 */
2671 if (cxgb4vf_debugfs_root) { 2672 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
2672 adapter->debugfs_root = 2673 adapter->debugfs_root =
2673 debugfs_create_dir(pci_name(pdev), 2674 debugfs_create_dir(pci_name(pdev),
2674 cxgb4vf_debugfs_root); 2675 cxgb4vf_debugfs_root);
2675 if (adapter->debugfs_root == NULL) 2676 if (IS_ERR_OR_NULL(adapter->debugfs_root))
2676 dev_warn(&pdev->dev, "could not create debugfs" 2677 dev_warn(&pdev->dev, "could not create debugfs"
2677 " directory"); 2678 " directory");
2678 else 2679 else
@@ -2706,7 +2707,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2706 cfg_queues(adapter); 2707 cfg_queues(adapter);
2707 2708
2708 /* 2709 /*
2709 * Print a short notice on the existance and configuration of the new 2710 * Print a short notice on the existence and configuration of the new
2710 * VF network device ... 2711 * VF network device ...
2711 */ 2712 */
2712 for_each_port(adapter, pidx) { 2713 for_each_port(adapter, pidx) {
@@ -2727,7 +2728,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2727 */ 2728 */
2728 2729
2729err_free_debugfs: 2730err_free_debugfs:
2730 if (adapter->debugfs_root) { 2731 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2731 cleanup_debugfs(adapter); 2732 cleanup_debugfs(adapter);
2732 debugfs_remove_recursive(adapter->debugfs_root); 2733 debugfs_remove_recursive(adapter->debugfs_root);
2733 } 2734 }
@@ -2759,7 +2760,6 @@ err_release_regions:
2759err_disable_device: 2760err_disable_device:
2760 pci_disable_device(pdev); 2761 pci_disable_device(pdev);
2761 2762
2762err_out:
2763 return err; 2763 return err;
2764} 2764}
2765 2765
@@ -2797,7 +2797,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2797 /* 2797 /*
2798 * Tear down our debugfs entries. 2798 * Tear down our debugfs entries.
2799 */ 2799 */
2800 if (adapter->debugfs_root) { 2800 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2801 cleanup_debugfs(adapter); 2801 cleanup_debugfs(adapter);
2802 debugfs_remove_recursive(adapter->debugfs_root); 2802 debugfs_remove_recursive(adapter->debugfs_root);
2803 } 2803 }
@@ -2831,6 +2831,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2831} 2831}
2832 2832
2833/* 2833/*
2834 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2835 * delivery.
2836 */
2837static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
2838{
2839 struct adapter *adapter;
2840 int pidx;
2841
2842 adapter = pci_get_drvdata(pdev);
2843 if (!adapter)
2844 return;
2845
2846 /*
2847 * Disable all Virtual Interfaces. This will shut down the
2848 * delivery of all ingress packets into the chip for these
2849 * Virtual Interfaces.
2850 */
2851 for_each_port(adapter, pidx) {
2852 struct net_device *netdev;
2853 struct port_info *pi;
2854
2855 if (!test_bit(pidx, &adapter->registered_device_map))
2856 continue;
2857
2858 netdev = adapter->port[pidx];
2859 if (!netdev)
2860 continue;
2861
2862 pi = netdev_priv(netdev);
2863 t4vf_enable_vi(adapter, pi->viid, false, false);
2864 }
2865
2866 /*
2867 * Free up all Queues which will prevent further DMA and
2868 * Interrupts allowing various internal pathways to drain.
2869 */
2870 t4vf_free_sge_resources(adapter);
2871}
2872
2873/*
2834 * PCI Device registration data structures. 2874 * PCI Device registration data structures.
2835 */ 2875 */
2836#define CH_DEVICE(devid, idx) \ 2876#define CH_DEVICE(devid, idx) \
@@ -2841,6 +2881,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
2841 CH_DEVICE(0x4800, 0), /* T440-dbg */ 2881 CH_DEVICE(0x4800, 0), /* T440-dbg */
2842 CH_DEVICE(0x4801, 0), /* T420-cr */ 2882 CH_DEVICE(0x4801, 0), /* T420-cr */
2843 CH_DEVICE(0x4802, 0), /* T422-cr */ 2883 CH_DEVICE(0x4802, 0), /* T422-cr */
2884 CH_DEVICE(0x4803, 0), /* T440-cr */
2885 CH_DEVICE(0x4804, 0), /* T420-bch */
2886 CH_DEVICE(0x4805, 0), /* T440-bch */
2887 CH_DEVICE(0x4806, 0), /* T460-ch */
2888 CH_DEVICE(0x4807, 0), /* T420-so */
2889 CH_DEVICE(0x4808, 0), /* T420-cx */
2890 CH_DEVICE(0x4809, 0), /* T420-bt */
2891 CH_DEVICE(0x480a, 0), /* T404-bt */
2844 { 0, } 2892 { 0, }
2845}; 2893};
2846 2894
@@ -2855,6 +2903,7 @@ static struct pci_driver cxgb4vf_driver = {
2855 .id_table = cxgb4vf_pci_tbl, 2903 .id_table = cxgb4vf_pci_tbl,
2856 .probe = cxgb4vf_pci_probe, 2904 .probe = cxgb4vf_pci_probe,
2857 .remove = __devexit_p(cxgb4vf_pci_remove), 2905 .remove = __devexit_p(cxgb4vf_pci_remove),
2906 .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
2858}; 2907};
2859 2908
2860/* 2909/*
@@ -2864,14 +2913,25 @@ static int __init cxgb4vf_module_init(void)
2864{ 2913{
2865 int ret; 2914 int ret;
2866 2915
2916 /*
2917 * Vet our module parameters.
2918 */
2919 if (msi != MSI_MSIX && msi != MSI_MSI) {
2920 printk(KERN_WARNING KBUILD_MODNAME
2921 ": bad module parameter msi=%d; must be %d"
2922 " (MSI-X or MSI) or %d (MSI)\n",
2923 msi, MSI_MSIX, MSI_MSI);
2924 return -EINVAL;
2925 }
2926
2867 /* Debugfs support is optional, just warn if this fails */ 2927 /* Debugfs support is optional, just warn if this fails */
2868 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 2928 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
2869 if (!cxgb4vf_debugfs_root) 2929 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2870 printk(KERN_WARNING KBUILD_MODNAME ": could not create" 2930 printk(KERN_WARNING KBUILD_MODNAME ": could not create"
2871 " debugfs entry, continuing\n"); 2931 " debugfs entry, continuing\n");
2872 2932
2873 ret = pci_register_driver(&cxgb4vf_driver); 2933 ret = pci_register_driver(&cxgb4vf_driver);
2874 if (ret < 0) 2934 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2875 debugfs_remove(cxgb4vf_debugfs_root); 2935 debugfs_remove(cxgb4vf_debugfs_root);
2876 return ret; 2936 return ret;
2877} 2937}
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index eb5a1c9cb2d3..5fd75fdaa631 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -41,6 +41,7 @@
41#include <net/ipv6.h> 41#include <net/ipv6.h>
42#include <net/tcp.h> 42#include <net/tcp.h>
43#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
44#include <linux/prefetch.h>
44 45
45#include "t4vf_common.h" 46#include "t4vf_common.h"
46#include "t4vf_defs.h" 47#include "t4vf_defs.h"
@@ -154,13 +155,14 @@ enum {
154 */ 155 */
155 RX_COPY_THRES = 256, 156 RX_COPY_THRES = 256,
156 RX_PULL_LEN = 128, 157 RX_PULL_LEN = 128,
157};
158 158
159/* 159 /*
160 * Can't define this in the above enum because PKTSHIFT isn't a constant in 160 * Main body length for sk_buffs used for RX Ethernet packets with
161 * the VF Driver ... 161 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
162 */ 162 * pskb_may_pull() some room.
163#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) 163 */
164 RX_SKB_LEN = 512,
165};
164 166
165/* 167/*
166 * Software state per TX descriptor. 168 * Software state per TX descriptor.
@@ -223,8 +225,8 @@ static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
223/** 225/**
224 * need_skb_unmap - does the platform need unmapping of sk_buffs? 226 * need_skb_unmap - does the platform need unmapping of sk_buffs?
225 * 227 *
226 * Returns true if the platfrom needs sk_buff unmapping. The compiler 228 * Returns true if the platform needs sk_buff unmapping. The compiler
227 * optimizes away unecessary code if this returns true. 229 * optimizes away unnecessary code if this returns true.
228 */ 230 */
229static inline int need_skb_unmap(void) 231static inline int need_skb_unmap(void)
230{ 232{
@@ -266,7 +268,7 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
266 * 268 *
267 * Tests specified Free List to see whether the number of buffers 269 * Tests specified Free List to see whether the number of buffers
268 * available to the hardware has falled below our "starvation" 270 * available to the hardware has falled below our "starvation"
269 * threshhold. 271 * threshold.
270 */ 272 */
271static inline bool fl_starving(const struct sge_fl *fl) 273static inline bool fl_starving(const struct sge_fl *fl)
272{ 274{
@@ -1148,7 +1150,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1148 if (unlikely(credits < ETHTXQ_STOP_THRES)) { 1150 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1149 /* 1151 /*
1150 * After we're done injecting the Work Request for this 1152 * After we're done injecting the Work Request for this
1151 * packet, we'll be below our "stop threshhold" so stop the TX 1153 * packet, we'll be below our "stop threshold" so stop the TX
1152 * Queue now and schedule a request for an SGE Egress Queue 1154 * Queue now and schedule a request for an SGE Egress Queue
1153 * Update message. The queue will get started later on when 1155 * Update message. The queue will get started later on when
1154 * the firmware processes this Work Request and sends us an 1156 * the firmware processes this Work Request and sends us an
@@ -1355,6 +1357,67 @@ out_free:
1355} 1357}
1356 1358
1357/** 1359/**
1360 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1361 * @gl: the gather list
1362 * @skb_len: size of sk_buff main body if it carries fragments
1363 * @pull_len: amount of data to move to the sk_buff's main body
1364 *
1365 * Builds an sk_buff from the given packet gather list. Returns the
1366 * sk_buff or %NULL if sk_buff allocation failed.
1367 */
1368struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1369 unsigned int skb_len, unsigned int pull_len)
1370{
1371 struct sk_buff *skb;
1372 struct skb_shared_info *ssi;
1373
1374 /*
1375 * If the ingress packet is small enough, allocate an skb large enough
1376 * for all of the data and copy it inline. Otherwise, allocate an skb
1377 * with enough room to pull in the header and reference the rest of
1378 * the data via the skb fragment list.
1379 *
1380 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1381 * buff! size, which is expected since buffers are at least
1382 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
1383 * fragment.
1384 */
1385 if (gl->tot_len <= RX_COPY_THRES) {
1386 /* small packets have only one fragment */
1387 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1388 if (unlikely(!skb))
1389 goto out;
1390 __skb_put(skb, gl->tot_len);
1391 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1392 } else {
1393 skb = alloc_skb(skb_len, GFP_ATOMIC);
1394 if (unlikely(!skb))
1395 goto out;
1396 __skb_put(skb, pull_len);
1397 skb_copy_to_linear_data(skb, gl->va, pull_len);
1398
1399 ssi = skb_shinfo(skb);
1400 ssi->frags[0].page = gl->frags[0].page;
1401 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
1402 ssi->frags[0].size = gl->frags[0].size - pull_len;
1403 if (gl->nfrags > 1)
1404 memcpy(&ssi->frags[1], &gl->frags[1],
1405 (gl->nfrags-1) * sizeof(skb_frag_t));
1406 ssi->nr_frags = gl->nfrags;
1407
1408 skb->len = gl->tot_len;
1409 skb->data_len = skb->len - pull_len;
1410 skb->truesize += skb->data_len;
1411
1412 /* Get a reference for the last page, we don't own it */
1413 get_page(gl->frags[gl->nfrags - 1].page);
1414 }
1415
1416out:
1417 return skb;
1418}
1419
1420/**
1358 * t4vf_pktgl_free - free a packet gather list 1421 * t4vf_pktgl_free - free a packet gather list
1359 * @gl: the gather list 1422 * @gl: the gather list
1360 * 1423 *
@@ -1463,10 +1526,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1463{ 1526{
1464 struct sk_buff *skb; 1527 struct sk_buff *skb;
1465 struct port_info *pi; 1528 struct port_info *pi;
1466 struct skb_shared_info *ssi;
1467 const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; 1529 const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
1468 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1530 bool csum_ok = pkt->csum_calc && !pkt->err_vec;
1469 unsigned int len = be16_to_cpu(pkt->len);
1470 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1531 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1471 1532
1472 /* 1533 /*
@@ -1481,51 +1542,22 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1481 } 1542 }
1482 1543
1483 /* 1544 /*
1484 * If the ingress packet is small enough, allocate an skb large enough 1545 * Convert the Packet Gather List into an skb.
1485 * for all of the data and copy it inline. Otherwise, allocate an skb
1486 * with enough room to pull in the header and reference the rest of
1487 * the data via the skb fragment list.
1488 */ 1546 */
1489 if (len <= RX_COPY_THRES) { 1547 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1490 /* small packets have only one fragment */ 1548 if (unlikely(!skb)) {
1491 skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); 1549 t4vf_pktgl_free(gl);
1492 if (!skb) 1550 rxq->stats.rx_drops++;
1493 goto nomem; 1551 return 0;
1494 __skb_put(skb, gl->frags[0].size);
1495 skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
1496 } else {
1497 skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
1498 if (!skb)
1499 goto nomem;
1500 __skb_put(skb, RX_PKT_PULL_LEN);
1501 skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
1502
1503 ssi = skb_shinfo(skb);
1504 ssi->frags[0].page = gl->frags[0].page;
1505 ssi->frags[0].page_offset = (gl->frags[0].page_offset +
1506 RX_PKT_PULL_LEN);
1507 ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
1508 if (gl->nfrags > 1)
1509 memcpy(&ssi->frags[1], &gl->frags[1],
1510 (gl->nfrags-1) * sizeof(skb_frag_t));
1511 ssi->nr_frags = gl->nfrags;
1512 skb->len = len + PKTSHIFT;
1513 skb->data_len = skb->len - RX_PKT_PULL_LEN;
1514 skb->truesize += skb->data_len;
1515
1516 /* Get a reference for the last page, we don't own it */
1517 get_page(gl->frags[gl->nfrags - 1].page);
1518 } 1552 }
1519
1520 __skb_pull(skb, PKTSHIFT); 1553 __skb_pull(skb, PKTSHIFT);
1521 skb->protocol = eth_type_trans(skb, rspq->netdev); 1554 skb->protocol = eth_type_trans(skb, rspq->netdev);
1522 skb_record_rx_queue(skb, rspq->idx); 1555 skb_record_rx_queue(skb, rspq->idx);
1523 skb->dev->last_rx = jiffies; /* XXX removed 2.6.29 */
1524 pi = netdev_priv(skb->dev); 1556 pi = netdev_priv(skb->dev);
1525 rxq->stats.pkts++; 1557 rxq->stats.pkts++;
1526 1558
1527 if (csum_ok && (pi->rx_offload & RX_CSO) && !pkt->err_vec && 1559 if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
1528 (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { 1560 !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
1529 if (!pkt->ip_frag) 1561 if (!pkt->ip_frag)
1530 skb->ip_summed = CHECKSUM_UNNECESSARY; 1562 skb->ip_summed = CHECKSUM_UNNECESSARY;
1531 else { 1563 else {
@@ -1535,8 +1567,11 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1535 } 1567 }
1536 rxq->stats.rx_cso++; 1568 rxq->stats.rx_cso++;
1537 } else 1569 } else
1538 skb->ip_summed = CHECKSUM_NONE; 1570 skb_checksum_none_assert(skb);
1539 1571
1572 /*
1573 * Deliver the packet to the stack.
1574 */
1540 if (unlikely(pkt->vlan_ex)) { 1575 if (unlikely(pkt->vlan_ex)) {
1541 struct vlan_group *grp = pi->vlan_grp; 1576 struct vlan_group *grp = pi->vlan_grp;
1542 1577
@@ -1550,11 +1585,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1550 netif_receive_skb(skb); 1585 netif_receive_skb(skb);
1551 1586
1552 return 0; 1587 return 0;
1553
1554nomem:
1555 t4vf_pktgl_free(gl);
1556 rxq->stats.rx_drops++;
1557 return 0;
1558} 1588}
1559 1589
1560/** 1590/**
@@ -1680,6 +1710,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
1680 } 1710 }
1681 len = RSPD_LEN(len); 1711 len = RSPD_LEN(len);
1682 } 1712 }
1713 gl.tot_len = len;
1683 1714
1684 /* 1715 /*
1685 * Gather packet fragments. 1716 * Gather packet fragments.
@@ -2116,7 +2147,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2116 2147
2117 /* 2148 /*
2118 * Calculate the size of the hardware free list ring plus 2149 * Calculate the size of the hardware free list ring plus
2119 * status page (which the SGE will place at the end of the 2150 * Status Page (which the SGE will place after the end of the
2120 * free list ring) in Egress Queue Units. 2151 * free list ring) in Egress Queue Units.
2121 */ 2152 */
2122 flsz = (fl->size / FL_PER_EQ_UNIT + 2153 flsz = (fl->size / FL_PER_EQ_UNIT +
@@ -2213,8 +2244,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2213 struct port_info *pi = netdev_priv(dev); 2244 struct port_info *pi = netdev_priv(dev);
2214 2245
2215 /* 2246 /*
2216 * Calculate the size of the hardware TX Queue (including the 2247 * Calculate the size of the hardware TX Queue (including the Status
2217 * status age on the end) in units of TX Descriptors. 2248 * Page on the end of the TX Queue) in units of TX Descriptors.
2218 */ 2249 */
2219 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2250 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2220 2251
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 5c7bde7f9bae..a65c80aed1f2 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -132,15 +132,15 @@ struct rss_params {
132 unsigned int mode; /* RSS mode */ 132 unsigned int mode; /* RSS mode */
133 union { 133 union {
134 struct { 134 struct {
135 int synmapen:1; /* SYN Map Enable */ 135 unsigned int synmapen:1; /* SYN Map Enable */
136 int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */ 136 unsigned int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */
137 int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */ 137 unsigned int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */
138 int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */ 138 unsigned int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */
139 int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */ 139 unsigned int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */
140 int ofdmapen:1; /* Offload Map Enable */ 140 unsigned int ofdmapen:1; /* Offload Map Enable */
141 int tnlmapen:1; /* Tunnel Map Enable */ 141 unsigned int tnlmapen:1; /* Tunnel Map Enable */
142 int tnlalllookup:1; /* Tunnel All Lookup */ 142 unsigned int tnlalllookup:1; /* Tunnel All Lookup */
143 int hashtoeplitz:1; /* use Toeplitz hash */ 143 unsigned int hashtoeplitz:1; /* use Toeplitz hash */
144 } basicvirtual; 144 } basicvirtual;
145 } u; 145 } u;
146}; 146};
@@ -151,10 +151,10 @@ struct rss_params {
151union rss_vi_config { 151union rss_vi_config {
152 struct { 152 struct {
153 u16 defaultq; /* Ingress Queue ID for !tnlalllookup */ 153 u16 defaultq; /* Ingress Queue ID for !tnlalllookup */
154 int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */ 154 unsigned int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */
155 int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */ 155 unsigned int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */
156 int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */ 156 unsigned int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */
157 int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */ 157 unsigned int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */
158 int udpen; /* hash 4-tuple UDP ingress packets */ 158 int udpen; /* hash 4-tuple UDP ingress packets */
159 } basicvirtual; 159 } basicvirtual;
160}; 160};
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
235int __devinit t4vf_wait_dev_ready(struct adapter *); 235int __devinit t4vf_wait_dev_ready(struct adapter *);
236int __devinit t4vf_port_init(struct adapter *, int); 236int __devinit t4vf_port_init(struct adapter *, int);
237 237
238int t4vf_fw_reset(struct adapter *);
238int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); 239int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
239int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); 240int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
240 241
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123f0cb4..192db226ec7f 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -116,7 +116,7 @@ static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
116int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 116int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
117 void *rpl, bool sleep_ok) 117 void *rpl, bool sleep_ok)
118{ 118{
119 static int delay[] = { 119 static const int delay[] = {
120 1, 1, 3, 5, 10, 10, 20, 50, 100 120 1, 1, 3, 5, 10, 10, 20, 50, 100
121 }; 121 };
122 122
@@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
147 /* 147 /*
148 * Write the command array into the Mailbox Data register array and 148 * Write the command array into the Mailbox Data register array and
149 * transfer ownership of the mailbox to the firmware. 149 * transfer ownership of the mailbox to the firmware.
150 *
151 * For the VFs, the Mailbox Data "registers" are actually backed by
152 * T4's "MA" interface rather than PL Registers (as is the case for
153 * the PFs). Because these are in different coherency domains, the
154 * write to the VF's PL-register-backed Mailbox Control can race in
155 * front of the writes to the MA-backed VF Mailbox Data "registers".
156 * So we need to do a read-back on at least one byte of the VF Mailbox
157 * Data registers before doing the write to the VF Mailbox Control
158 * register.
150 */ 159 */
151 for (i = 0, p = cmd; i < size; i += 8) 160 for (i = 0, p = cmd; i < size; i += 8)
152 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 161 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
162 t4_read_reg(adapter, mbox_data); /* flush write */
163
153 t4_write_reg(adapter, mbox_ctl, 164 t4_write_reg(adapter, mbox_ctl,
154 MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 165 MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
155 t4_read_reg(adapter, mbox_ctl); /* flush write */ 166 t4_read_reg(adapter, mbox_ctl); /* flush write */
@@ -160,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
160 delay_idx = 0; 171 delay_idx = 0;
161 ms = delay[0]; 172 ms = delay[0];
162 173
163 for (i = 0; i < 500; i += ms) { 174 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
164 if (sleep_ok) { 175 if (sleep_ok) {
165 ms = delay[delay_idx]; 176 ms = delay[delay_idx];
166 if (delay_idx < ARRAY_SIZE(delay) - 1) 177 if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -326,6 +337,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
326} 337}
327 338
328/** 339/**
340 * t4vf_fw_reset - issue a reset to FW
341 * @adapter: the adapter
342 *
343 * Issues a reset command to FW. For a Physical Function this would
344 * result in the Firmware reseting all of its state. For a Virtual
345 * Function this just resets the state associated with the VF.
346 */
347int t4vf_fw_reset(struct adapter *adapter)
348{
349 struct fw_reset_cmd cmd;
350
351 memset(&cmd, 0, sizeof(cmd));
352 cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
353 FW_CMD_WRITE);
354 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
355 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
356}
357
358/**
329 * t4vf_query_params - query FW or device parameters 359 * t4vf_query_params - query FW or device parameters
330 * @adapter: the adapter 360 * @adapter: the adapter
331 * @nparams: the number of parameters 361 * @nparams: the number of parameters
@@ -995,48 +1025,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
995 unsigned int naddr, const u8 **addr, u16 *idx, 1025 unsigned int naddr, const u8 **addr, u16 *idx,
996 u64 *hash, bool sleep_ok) 1026 u64 *hash, bool sleep_ok)
997{ 1027{
998 int i, ret; 1028 int offset, ret = 0;
1029 unsigned nfilters = 0;
1030 unsigned int rem = naddr;
999 struct fw_vi_mac_cmd cmd, rpl; 1031 struct fw_vi_mac_cmd cmd, rpl;
1000 struct fw_vi_mac_exact *p;
1001 size_t len16;
1002 1032
1003 if (naddr > ARRAY_SIZE(cmd.u.exact)) 1033 if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
1004 return -EINVAL; 1034 return -EINVAL;
1005 len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1006 u.exact[naddr]), 16);
1007 1035
1008 memset(&cmd, 0, sizeof(cmd)); 1036 for (offset = 0; offset < naddr; /**/) {
1009 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | 1037 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
1010 FW_CMD_REQUEST | 1038 ? rem
1011 FW_CMD_WRITE | 1039 : ARRAY_SIZE(cmd.u.exact));
1012 (free ? FW_CMD_EXEC : 0) | 1040 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1013 FW_VI_MAC_CMD_VIID(viid)); 1041 u.exact[fw_naddr]), 16);
1014 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | 1042 struct fw_vi_mac_exact *p;
1015 FW_CMD_LEN16(len16)); 1043 int i;
1044
1045 memset(&cmd, 0, sizeof(cmd));
1046 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
1047 FW_CMD_REQUEST |
1048 FW_CMD_WRITE |
1049 (free ? FW_CMD_EXEC : 0) |
1050 FW_VI_MAC_CMD_VIID(viid));
1051 cmd.freemacs_to_len16 =
1052 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
1053 FW_CMD_LEN16(len16));
1054
1055 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1056 p->valid_to_idx = cpu_to_be16(
1057 FW_VI_MAC_CMD_VALID |
1058 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
1059 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1060 }
1016 1061
1017 for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
1018 p->valid_to_idx =
1019 cpu_to_be16(FW_VI_MAC_CMD_VALID |
1020 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
1021 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
1022 }
1023 1062
1024 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok); 1063 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
1025 if (ret) 1064 sleep_ok);
1026 return ret; 1065 if (ret && ret != -ENOMEM)
1027 1066 break;
1028 for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) { 1067
1029 u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); 1068 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
1030 1069 u16 index = FW_VI_MAC_CMD_IDX_GET(
1031 if (idx) 1070 be16_to_cpu(p->valid_to_idx));
1032 idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES 1071
1033 ? 0xffff 1072 if (idx)
1034 : index); 1073 idx[offset+i] =
1035 if (index < FW_CLS_TCAM_NUM_ENTRIES) 1074 (index >= FW_CLS_TCAM_NUM_ENTRIES
1036 ret++; 1075 ? 0xffff
1037 else if (hash) 1076 : index);
1038 *hash |= (1 << hash_mac_addr(addr[i])); 1077 if (index < FW_CLS_TCAM_NUM_ENTRIES)
1078 nfilters++;
1079 else if (hash)
1080 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
1081 }
1082
1083 free = false;
1084 offset += fw_naddr;
1085 rem -= fw_naddr;
1039 } 1086 }
1087
1088 /*
1089 * If there were no errors or we merely ran out of room in our MAC
1090 * address arena, return the number of filters actually written.
1091 */
1092 if (ret == 0 || ret == -ENOMEM)
1093 ret = nfilters;
1040 return ret; 1094 return ret;
1041} 1095}
1042 1096
@@ -1257,7 +1311,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
1257 */ 1311 */
1258int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) 1312int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1259{ 1313{
1260 struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl; 1314 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
1261 u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi)); 1315 u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
1262 1316
1263 switch (opcode) { 1317 switch (opcode) {
@@ -1265,7 +1319,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1265 /* 1319 /*
1266 * Link/module state change message. 1320 * Link/module state change message.
1267 */ 1321 */
1268 const struct fw_port_cmd *port_cmd = (void *)rpl; 1322 const struct fw_port_cmd *port_cmd =
1323 (const struct fw_port_cmd *)rpl;
1269 u32 word; 1324 u32 word;
1270 int action, port_id, link_ok, speed, fc, pidx; 1325 int action, port_id, link_ok, speed, fc, pidx;
1271 1326