aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb4vf
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-12-22 12:57:02 -0500
committerJiri Kosina <jkosina@suse.cz>2010-12-22 12:57:02 -0500
commit4b7bd364700d9ac8372eff48832062b936d0793b (patch)
tree0dbf78c95456a0b02d07fcd473281f04a87e266d /drivers/net/cxgb4vf
parentc0d8768af260e2cbb4bf659ae6094a262c86b085 (diff)
parent90a8a73c06cc32b609a880d48449d7083327e11a (diff)
Merge branch 'master' into for-next
Conflicts: MAINTAINERS arch/arm/mach-omap2/pm24xx.c drivers/scsi/bfa/bfa_fcpim.c Needed to update to apply fixes for which the old branch was too outdated.
Diffstat (limited to 'drivers/net/cxgb4vf')
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c131
-rw-r--r--drivers/net/cxgb4vf/sge.c122
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c113
4 files changed, 237 insertions, 130 deletions
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5a2e93..6bf464afa90e 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -753,7 +753,9 @@ static int cxgb4vf_open(struct net_device *dev)
753 if (err) 753 if (err)
754 return err; 754 return err;
755 set_bit(pi->port_id, &adapter->open_device_map); 755 set_bit(pi->port_id, &adapter->open_device_map);
756 link_start(dev); 756 err = link_start(dev);
757 if (err)
758 return err;
757 netif_tx_start_all_queues(dev); 759 netif_tx_start_all_queues(dev);
758 return 0; 760 return 0;
759} 761}
@@ -814,40 +816,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
814} 816}
815 817
816/* 818/*
817 * Collect up to maxaddrs worth of a netdevice's unicast addresses into an 819 * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
818 * array of addrss pointers and return the number collected. 820 * at a specified offset within the list, into an array of addrss pointers and
821 * return the number collected.
819 */ 822 */
820static inline int collect_netdev_uc_list_addrs(const struct net_device *dev, 823static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
821 const u8 **addr, 824 const u8 **addr,
822 unsigned int maxaddrs) 825 unsigned int offset,
826 unsigned int maxaddrs)
823{ 827{
828 unsigned int index = 0;
824 unsigned int naddr = 0; 829 unsigned int naddr = 0;
825 const struct netdev_hw_addr *ha; 830 const struct netdev_hw_addr *ha;
826 831
827 for_each_dev_addr(dev, ha) { 832 for_each_dev_addr(dev, ha)
828 addr[naddr++] = ha->addr; 833 if (index++ >= offset) {
829 if (naddr >= maxaddrs) 834 addr[naddr++] = ha->addr;
830 break; 835 if (naddr >= maxaddrs)
831 } 836 break;
837 }
832 return naddr; 838 return naddr;
833} 839}
834 840
835/* 841/*
836 * Collect up to maxaddrs worth of a netdevice's multicast addresses into an 842 * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
837 * array of addrss pointers and return the number collected. 843 * at a specified offset within the list, into an array of addrss pointers and
844 * return the number collected.
838 */ 845 */
839static inline int collect_netdev_mc_list_addrs(const struct net_device *dev, 846static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
840 const u8 **addr, 847 const u8 **addr,
841 unsigned int maxaddrs) 848 unsigned int offset,
849 unsigned int maxaddrs)
842{ 850{
851 unsigned int index = 0;
843 unsigned int naddr = 0; 852 unsigned int naddr = 0;
844 const struct netdev_hw_addr *ha; 853 const struct netdev_hw_addr *ha;
845 854
846 netdev_for_each_mc_addr(ha, dev) { 855 netdev_for_each_mc_addr(ha, dev)
847 addr[naddr++] = ha->addr; 856 if (index++ >= offset) {
848 if (naddr >= maxaddrs) 857 addr[naddr++] = ha->addr;
849 break; 858 if (naddr >= maxaddrs)
850 } 859 break;
860 }
851 return naddr; 861 return naddr;
852} 862}
853 863
@@ -860,16 +870,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
860 u64 mhash = 0; 870 u64 mhash = 0;
861 u64 uhash = 0; 871 u64 uhash = 0;
862 bool free = true; 872 bool free = true;
863 u16 filt_idx[7]; 873 unsigned int offset, naddr;
864 const u8 *addr[7]; 874 const u8 *addr[7];
865 int ret, naddr = 0; 875 int ret;
866 const struct port_info *pi = netdev_priv(dev); 876 const struct port_info *pi = netdev_priv(dev);
867 877
868 /* first do the secondary unicast addresses */ 878 /* first do the secondary unicast addresses */
869 naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr)); 879 for (offset = 0; ; offset += naddr) {
870 if (naddr > 0) { 880 naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
881 ARRAY_SIZE(addr));
882 if (naddr == 0)
883 break;
884
871 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, 885 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
872 naddr, addr, filt_idx, &uhash, sleep); 886 naddr, addr, NULL, &uhash, sleep);
873 if (ret < 0) 887 if (ret < 0)
874 return ret; 888 return ret;
875 889
@@ -877,12 +891,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
877 } 891 }
878 892
879 /* next set up the multicast addresses */ 893 /* next set up the multicast addresses */
880 naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr)); 894 for (offset = 0; ; offset += naddr) {
881 if (naddr > 0) { 895 naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
896 ARRAY_SIZE(addr));
897 if (naddr == 0)
898 break;
899
882 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, 900 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
883 naddr, addr, filt_idx, &mhash, sleep); 901 naddr, addr, NULL, &mhash, sleep);
884 if (ret < 0) 902 if (ret < 0)
885 return ret; 903 return ret;
904 free = false;
886 } 905 }
887 906
888 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, 907 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
@@ -1103,18 +1122,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1103 return 0; 1122 return 0;
1104} 1123}
1105 1124
1106/*
1107 * Return a TX Queue on which to send the specified skb.
1108 */
1109static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
1110{
1111 /*
1112 * XXX For now just use the default hash but we probably want to
1113 * XXX look at other possibilities ...
1114 */
1115 return skb_tx_hash(dev, skb);
1116}
1117
1118#ifdef CONFIG_NET_POLL_CONTROLLER 1125#ifdef CONFIG_NET_POLL_CONTROLLER
1119/* 1126/*
1120 * Poll all of our receive queues. This is called outside of normal interrupt 1127 * Poll all of our receive queues. This is called outside of normal interrupt
@@ -2075,6 +2082,22 @@ static int adap_init0(struct adapter *adapter)
2075 } 2082 }
2076 2083
2077 /* 2084 /*
2085 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2086 * 2.6.31 and later we can't call pci_reset_function() in order to
2087 * issue an FLR because of a self- deadlock on the device semaphore.
2088 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2089 * cases where they're needed -- for instance, some versions of KVM
2090 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2091 * use the firmware based reset in order to reset any per function
2092 * state.
2093 */
2094 err = t4vf_fw_reset(adapter);
2095 if (err < 0) {
2096 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2097 return err;
2098 }
2099
2100 /*
2078 * Grab basic operational parameters. These will predominantly have 2101 * Grab basic operational parameters. These will predominantly have
2079 * been set up by the Physical Function Driver or will be hard coded 2102 * been set up by the Physical Function Driver or will be hard coded
2080 * into the adapter. We just have to live with them ... Note that 2103 * into the adapter. We just have to live with them ... Note that
@@ -2246,6 +2269,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2246{ 2269{
2247 struct sge *s = &adapter->sge; 2270 struct sge *s = &adapter->sge;
2248 int q10g, n10g, qidx, pidx, qs; 2271 int q10g, n10g, qidx, pidx, qs;
2272 size_t iqe_size;
2249 2273
2250 /* 2274 /*
2251 * We should not be called till we know how many Queue Sets we can 2275 * We should not be called till we know how many Queue Sets we can
@@ -2290,6 +2314,13 @@ static void __devinit cfg_queues(struct adapter *adapter)
2290 s->ethqsets = qidx; 2314 s->ethqsets = qidx;
2291 2315
2292 /* 2316 /*
2317 * The Ingress Queue Entry Size for our various Response Queues needs
2318 * to be big enough to accommodate the largest message we can receive
2319 * from the chip/firmware; which is 64 bytes ...
2320 */
2321 iqe_size = 64;
2322
2323 /*
2293 * Set up default Queue Set parameters ... Start off with the 2324 * Set up default Queue Set parameters ... Start off with the
2294 * shortest interrupt holdoff timer. 2325 * shortest interrupt holdoff timer.
2295 */ 2326 */
@@ -2297,7 +2328,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2297 struct sge_eth_rxq *rxq = &s->ethrxq[qs]; 2328 struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2298 struct sge_eth_txq *txq = &s->ethtxq[qs]; 2329 struct sge_eth_txq *txq = &s->ethtxq[qs];
2299 2330
2300 init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES); 2331 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2301 rxq->fl.size = 72; 2332 rxq->fl.size = 72;
2302 txq->q.size = 1024; 2333 txq->q.size = 1024;
2303 } 2334 }
@@ -2306,8 +2337,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2306 * The firmware event queue is used for link state changes and 2337 * The firmware event queue is used for link state changes and
2307 * notifications of TX DMA completions. 2338 * notifications of TX DMA completions.
2308 */ 2339 */
2309 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, 2340 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2310 L1_CACHE_BYTES);
2311 2341
2312 /* 2342 /*
2313 * The forwarded interrupt queue is used when we're in MSI interrupt 2343 * The forwarded interrupt queue is used when we're in MSI interrupt
@@ -2323,7 +2353,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2323 * any time ... 2353 * any time ...
2324 */ 2354 */
2325 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, 2355 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2326 L1_CACHE_BYTES); 2356 iqe_size);
2327} 2357}
2328 2358
2329/* 2359/*
@@ -2417,7 +2447,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
2417 .ndo_get_stats = cxgb4vf_get_stats, 2447 .ndo_get_stats = cxgb4vf_get_stats,
2418 .ndo_set_rx_mode = cxgb4vf_set_rxmode, 2448 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2419 .ndo_set_mac_address = cxgb4vf_set_mac_addr, 2449 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2420 .ndo_select_queue = cxgb4vf_select_queue,
2421 .ndo_validate_addr = eth_validate_addr, 2450 .ndo_validate_addr = eth_validate_addr,
2422 .ndo_do_ioctl = cxgb4vf_do_ioctl, 2451 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2423 .ndo_change_mtu = cxgb4vf_change_mtu, 2452 .ndo_change_mtu = cxgb4vf_change_mtu,
@@ -2600,7 +2629,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2600 pi->xact_addr_filt = -1; 2629 pi->xact_addr_filt = -1;
2601 pi->rx_offload = RX_CSO; 2630 pi->rx_offload = RX_CSO;
2602 netif_carrier_off(netdev); 2631 netif_carrier_off(netdev);
2603 netif_tx_stop_all_queues(netdev);
2604 netdev->irq = pdev->irq; 2632 netdev->irq = pdev->irq;
2605 2633
2606 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 2634 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
@@ -2625,7 +2653,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2625 netdev->do_ioctl = cxgb4vf_do_ioctl; 2653 netdev->do_ioctl = cxgb4vf_do_ioctl;
2626 netdev->change_mtu = cxgb4vf_change_mtu; 2654 netdev->change_mtu = cxgb4vf_change_mtu;
2627 netdev->set_mac_address = cxgb4vf_set_mac_addr; 2655 netdev->set_mac_address = cxgb4vf_set_mac_addr;
2628 netdev->select_queue = cxgb4vf_select_queue;
2629#ifdef CONFIG_NET_POLL_CONTROLLER 2656#ifdef CONFIG_NET_POLL_CONTROLLER
2630 netdev->poll_controller = cxgb4vf_poll_controller; 2657 netdev->poll_controller = cxgb4vf_poll_controller;
2631#endif 2658#endif
@@ -2844,6 +2871,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
2844 CH_DEVICE(0x4800, 0), /* T440-dbg */ 2871 CH_DEVICE(0x4800, 0), /* T440-dbg */
2845 CH_DEVICE(0x4801, 0), /* T420-cr */ 2872 CH_DEVICE(0x4801, 0), /* T420-cr */
2846 CH_DEVICE(0x4802, 0), /* T422-cr */ 2873 CH_DEVICE(0x4802, 0), /* T422-cr */
2874 CH_DEVICE(0x4803, 0), /* T440-cr */
2875 CH_DEVICE(0x4804, 0), /* T420-bch */
2876 CH_DEVICE(0x4805, 0), /* T440-bch */
2877 CH_DEVICE(0x4806, 0), /* T460-ch */
2878 CH_DEVICE(0x4807, 0), /* T420-so */
2879 CH_DEVICE(0x4808, 0), /* T420-cx */
2880 CH_DEVICE(0x4809, 0), /* T420-bt */
2881 CH_DEVICE(0x480a, 0), /* T404-bt */
2847 { 0, } 2882 { 0, }
2848}; 2883};
2849 2884
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864ddafbe..ecf0770bf0ff 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@ enum {
154 */ 154 */
155 RX_COPY_THRES = 256, 155 RX_COPY_THRES = 256,
156 RX_PULL_LEN = 128, 156 RX_PULL_LEN = 128,
157};
158 157
159/* 158 /*
160 * Can't define this in the above enum because PKTSHIFT isn't a constant in 159 * Main body length for sk_buffs used for RX Ethernet packets with
161 * the VF Driver ... 160 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
162 */ 161 * pskb_may_pull() some room.
163#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) 162 */
163 RX_SKB_LEN = 512,
164};
164 165
165/* 166/*
166 * Software state per TX descriptor. 167 * Software state per TX descriptor.
@@ -1355,6 +1356,67 @@ out_free:
1355} 1356}
1356 1357
1357/** 1358/**
1359 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1360 * @gl: the gather list
1361 * @skb_len: size of sk_buff main body if it carries fragments
1362 * @pull_len: amount of data to move to the sk_buff's main body
1363 *
1364 * Builds an sk_buff from the given packet gather list. Returns the
1365 * sk_buff or %NULL if sk_buff allocation failed.
1366 */
1367struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1368 unsigned int skb_len, unsigned int pull_len)
1369{
1370 struct sk_buff *skb;
1371 struct skb_shared_info *ssi;
1372
1373 /*
1374 * If the ingress packet is small enough, allocate an skb large enough
1375 * for all of the data and copy it inline. Otherwise, allocate an skb
1376 * with enough room to pull in the header and reference the rest of
1377 * the data via the skb fragment list.
1378 *
1379 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1380 * buff! size, which is expected since buffers are at least
1381 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
1382 * fragment.
1383 */
1384 if (gl->tot_len <= RX_COPY_THRES) {
1385 /* small packets have only one fragment */
1386 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1387 if (unlikely(!skb))
1388 goto out;
1389 __skb_put(skb, gl->tot_len);
1390 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1391 } else {
1392 skb = alloc_skb(skb_len, GFP_ATOMIC);
1393 if (unlikely(!skb))
1394 goto out;
1395 __skb_put(skb, pull_len);
1396 skb_copy_to_linear_data(skb, gl->va, pull_len);
1397
1398 ssi = skb_shinfo(skb);
1399 ssi->frags[0].page = gl->frags[0].page;
1400 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
1401 ssi->frags[0].size = gl->frags[0].size - pull_len;
1402 if (gl->nfrags > 1)
1403 memcpy(&ssi->frags[1], &gl->frags[1],
1404 (gl->nfrags-1) * sizeof(skb_frag_t));
1405 ssi->nr_frags = gl->nfrags;
1406
1407 skb->len = gl->tot_len;
1408 skb->data_len = skb->len - pull_len;
1409 skb->truesize += skb->data_len;
1410
1411 /* Get a reference for the last page, we don't own it */
1412 get_page(gl->frags[gl->nfrags - 1].page);
1413 }
1414
1415out:
1416 return skb;
1417}
1418
1419/**
1358 * t4vf_pktgl_free - free a packet gather list 1420 * t4vf_pktgl_free - free a packet gather list
1359 * @gl: the gather list 1421 * @gl: the gather list
1360 * 1422 *
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1463{ 1525{
1464 struct sk_buff *skb; 1526 struct sk_buff *skb;
1465 struct port_info *pi; 1527 struct port_info *pi;
1466 struct skb_shared_info *ssi;
1467 const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; 1528 const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
1468 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1529 bool csum_ok = pkt->csum_calc && !pkt->err_vec;
1469 unsigned int len = be16_to_cpu(pkt->len);
1470 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1530 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1471 1531
1472 /* 1532 /*
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1481 } 1541 }
1482 1542
1483 /* 1543 /*
1484 * If the ingress packet is small enough, allocate an skb large enough 1544 * Convert the Packet Gather List into an skb.
1485 * for all of the data and copy it inline. Otherwise, allocate an skb
1486 * with enough room to pull in the header and reference the rest of
1487 * the data via the skb fragment list.
1488 */ 1545 */
1489 if (len <= RX_COPY_THRES) { 1546 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1490 /* small packets have only one fragment */ 1547 if (unlikely(!skb)) {
1491 skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); 1548 t4vf_pktgl_free(gl);
1492 if (!skb) 1549 rxq->stats.rx_drops++;
1493 goto nomem; 1550 return 0;
1494 __skb_put(skb, gl->frags[0].size);
1495 skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
1496 } else {
1497 skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
1498 if (!skb)
1499 goto nomem;
1500 __skb_put(skb, RX_PKT_PULL_LEN);
1501 skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
1502
1503 ssi = skb_shinfo(skb);
1504 ssi->frags[0].page = gl->frags[0].page;
1505 ssi->frags[0].page_offset = (gl->frags[0].page_offset +
1506 RX_PKT_PULL_LEN);
1507 ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
1508 if (gl->nfrags > 1)
1509 memcpy(&ssi->frags[1], &gl->frags[1],
1510 (gl->nfrags-1) * sizeof(skb_frag_t));
1511 ssi->nr_frags = gl->nfrags;
1512 skb->len = len + PKTSHIFT;
1513 skb->data_len = skb->len - RX_PKT_PULL_LEN;
1514 skb->truesize += skb->data_len;
1515
1516 /* Get a reference for the last page, we don't own it */
1517 get_page(gl->frags[gl->nfrags - 1].page);
1518 } 1551 }
1519
1520 __skb_pull(skb, PKTSHIFT); 1552 __skb_pull(skb, PKTSHIFT);
1521 skb->protocol = eth_type_trans(skb, rspq->netdev); 1553 skb->protocol = eth_type_trans(skb, rspq->netdev);
1522 skb_record_rx_queue(skb, rspq->idx); 1554 skb_record_rx_queue(skb, rspq->idx);
@@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1549 netif_receive_skb(skb); 1581 netif_receive_skb(skb);
1550 1582
1551 return 0; 1583 return 0;
1552
1553nomem:
1554 t4vf_pktgl_free(gl);
1555 rxq->stats.rx_drops++;
1556 return 0;
1557} 1584}
1558 1585
1559/** 1586/**
@@ -1679,6 +1706,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
1679 } 1706 }
1680 len = RSPD_LEN(len); 1707 len = RSPD_LEN(len);
1681 } 1708 }
1709 gl.tot_len = len;
1682 1710
1683 /* 1711 /*
1684 * Gather packet fragments. 1712 * Gather packet fragments.
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 873cb7d86c57..a65c80aed1f2 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
235int __devinit t4vf_wait_dev_ready(struct adapter *); 235int __devinit t4vf_wait_dev_ready(struct adapter *);
236int __devinit t4vf_port_init(struct adapter *, int); 236int __devinit t4vf_port_init(struct adapter *, int);
237 237
238int t4vf_fw_reset(struct adapter *);
238int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); 239int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
239int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); 240int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
240 241
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123f0cb4..19520afe1a12 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
326} 326}
327 327
328/** 328/**
329 * t4vf_fw_reset - issue a reset to FW
330 * @adapter: the adapter
331 *
332 * Issues a reset command to FW. For a Physical Function this would
333 * result in the Firmware reseting all of its state. For a Virtual
334 * Function this just resets the state associated with the VF.
335 */
336int t4vf_fw_reset(struct adapter *adapter)
337{
338 struct fw_reset_cmd cmd;
339
340 memset(&cmd, 0, sizeof(cmd));
341 cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
342 FW_CMD_WRITE);
343 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
344 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
345}
346
347/**
329 * t4vf_query_params - query FW or device parameters 348 * t4vf_query_params - query FW or device parameters
330 * @adapter: the adapter 349 * @adapter: the adapter
331 * @nparams: the number of parameters 350 * @nparams: the number of parameters
@@ -995,48 +1014,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
995 unsigned int naddr, const u8 **addr, u16 *idx, 1014 unsigned int naddr, const u8 **addr, u16 *idx,
996 u64 *hash, bool sleep_ok) 1015 u64 *hash, bool sleep_ok)
997{ 1016{
998 int i, ret; 1017 int offset, ret = 0;
1018 unsigned nfilters = 0;
1019 unsigned int rem = naddr;
999 struct fw_vi_mac_cmd cmd, rpl; 1020 struct fw_vi_mac_cmd cmd, rpl;
1000 struct fw_vi_mac_exact *p;
1001 size_t len16;
1002 1021
1003 if (naddr > ARRAY_SIZE(cmd.u.exact)) 1022 if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
1004 return -EINVAL; 1023 return -EINVAL;
1005 len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1006 u.exact[naddr]), 16);
1007 1024
1008 memset(&cmd, 0, sizeof(cmd)); 1025 for (offset = 0; offset < naddr; /**/) {
1009 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | 1026 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
1010 FW_CMD_REQUEST | 1027 ? rem
1011 FW_CMD_WRITE | 1028 : ARRAY_SIZE(cmd.u.exact));
1012 (free ? FW_CMD_EXEC : 0) | 1029 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1013 FW_VI_MAC_CMD_VIID(viid)); 1030 u.exact[fw_naddr]), 16);
1014 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | 1031 struct fw_vi_mac_exact *p;
1015 FW_CMD_LEN16(len16)); 1032 int i;
1033
1034 memset(&cmd, 0, sizeof(cmd));
1035 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
1036 FW_CMD_REQUEST |
1037 FW_CMD_WRITE |
1038 (free ? FW_CMD_EXEC : 0) |
1039 FW_VI_MAC_CMD_VIID(viid));
1040 cmd.freemacs_to_len16 =
1041 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
1042 FW_CMD_LEN16(len16));
1043
1044 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1045 p->valid_to_idx = cpu_to_be16(
1046 FW_VI_MAC_CMD_VALID |
1047 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
1048 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1049 }
1016 1050
1017 for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
1018 p->valid_to_idx =
1019 cpu_to_be16(FW_VI_MAC_CMD_VALID |
1020 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
1021 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
1022 }
1023 1051
1024 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok); 1052 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
1025 if (ret) 1053 sleep_ok);
1026 return ret; 1054 if (ret && ret != -ENOMEM)
1027 1055 break;
1028 for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) { 1056
1029 u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); 1057 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
1030 1058 u16 index = FW_VI_MAC_CMD_IDX_GET(
1031 if (idx) 1059 be16_to_cpu(p->valid_to_idx));
1032 idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES 1060
1033 ? 0xffff 1061 if (idx)
1034 : index); 1062 idx[offset+i] =
1035 if (index < FW_CLS_TCAM_NUM_ENTRIES) 1063 (index >= FW_CLS_TCAM_NUM_ENTRIES
1036 ret++; 1064 ? 0xffff
1037 else if (hash) 1065 : index);
1038 *hash |= (1 << hash_mac_addr(addr[i])); 1066 if (index < FW_CLS_TCAM_NUM_ENTRIES)
1067 nfilters++;
1068 else if (hash)
1069 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
1070 }
1071
1072 free = false;
1073 offset += fw_naddr;
1074 rem -= fw_naddr;
1039 } 1075 }
1076
1077 /*
1078 * If there were no errors or we merely ran out of room in our MAC
1079 * address arena, return the number of filters actually written.
1080 */
1081 if (ret == 0 || ret == -ENOMEM)
1082 ret = nfilters;
1040 return ret; 1083 return ret;
1041} 1084}
1042 1085