diff options
author | Ajit Khaparde <ajitk@serverengines.com> | 2009-07-21 15:36:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-07-23 21:00:59 -0400 |
commit | 5be93b9a865344cf69958777c8d7c6f758cba416 (patch) | |
tree | 77c85b7b47d5c9e136666927cef2f6d21280ae60 /drivers/net/benet/be_main.c | |
parent | 2eee40c7f7c3734b28456169b2945e07d5ac0e2d (diff) |
be2net: Add GRO support to the be2net driver. LRO is not supported anymore.
This patch removes support for INET_LRO and switches over to GRO.
Signed-off-by: Ajit Khaparde <ajitk@serverengines.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/benet/be_main.c')
-rw-r--r-- | drivers/net/benet/be_main.c | 104 |
1 files changed, 29 insertions, 75 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index c43f6a119295..45df8e2d0921 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -742,7 +742,7 @@ done: | |||
742 | return; | 742 | return; |
743 | } | 743 | } |
744 | 744 | ||
745 | /* Process the RX completion indicated by rxcp when LRO is disabled */ | 745 | /* Process the RX completion indicated by rxcp when GRO is disabled */ |
746 | static void be_rx_compl_process(struct be_adapter *adapter, | 746 | static void be_rx_compl_process(struct be_adapter *adapter, |
747 | struct be_eth_rx_compl *rxcp) | 747 | struct be_eth_rx_compl *rxcp) |
748 | { | 748 | { |
@@ -789,13 +789,14 @@ static void be_rx_compl_process(struct be_adapter *adapter, | |||
789 | return; | 789 | return; |
790 | } | 790 | } |
791 | 791 | ||
792 | /* Process the RX completion indicated by rxcp when LRO is enabled */ | 792 | /* Process the RX completion indicated by rxcp when GRO is enabled */ |
793 | static void be_rx_compl_process_lro(struct be_adapter *adapter, | 793 | static void be_rx_compl_process_gro(struct be_adapter *adapter, |
794 | struct be_eth_rx_compl *rxcp) | 794 | struct be_eth_rx_compl *rxcp) |
795 | { | 795 | { |
796 | struct be_rx_page_info *page_info; | 796 | struct be_rx_page_info *page_info; |
797 | struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME]; | 797 | struct sk_buff *skb = NULL; |
798 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 798 | struct be_queue_info *rxq = &adapter->rx_obj.q; |
799 | struct be_eq_obj *eq_obj = &adapter->rx_eq; | ||
799 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; | 800 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; |
800 | u16 i, rxq_idx = 0, vid, j; | 801 | u16 i, rxq_idx = 0, vid, j; |
801 | 802 | ||
@@ -804,6 +805,12 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, | |||
804 | vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); | 805 | vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); |
805 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | 806 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); |
806 | 807 | ||
808 | skb = napi_get_frags(&eq_obj->napi); | ||
809 | if (!skb) { | ||
810 | be_rx_compl_discard(adapter, rxcp); | ||
811 | return; | ||
812 | } | ||
813 | |||
807 | remaining = pkt_size; | 814 | remaining = pkt_size; |
808 | for (i = 0, j = -1; i < num_rcvd; i++) { | 815 | for (i = 0, j = -1; i < num_rcvd; i++) { |
809 | page_info = get_rx_page_info(adapter, rxq_idx); | 816 | page_info = get_rx_page_info(adapter, rxq_idx); |
@@ -814,13 +821,14 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, | |||
814 | if (i == 0 || page_info->page_offset == 0) { | 821 | if (i == 0 || page_info->page_offset == 0) { |
815 | /* First frag or Fresh page */ | 822 | /* First frag or Fresh page */ |
816 | j++; | 823 | j++; |
817 | rx_frags[j].page = page_info->page; | 824 | skb_shinfo(skb)->frags[j].page = page_info->page; |
818 | rx_frags[j].page_offset = page_info->page_offset; | 825 | skb_shinfo(skb)->frags[j].page_offset = |
819 | rx_frags[j].size = 0; | 826 | page_info->page_offset; |
827 | skb_shinfo(skb)->frags[j].size = 0; | ||
820 | } else { | 828 | } else { |
821 | put_page(page_info->page); | 829 | put_page(page_info->page); |
822 | } | 830 | } |
823 | rx_frags[j].size += curr_frag_len; | 831 | skb_shinfo(skb)->frags[j].size += curr_frag_len; |
824 | 832 | ||
825 | remaining -= curr_frag_len; | 833 | remaining -= curr_frag_len; |
826 | index_inc(&rxq_idx, rxq->len); | 834 | index_inc(&rxq_idx, rxq->len); |
@@ -828,9 +836,14 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, | |||
828 | } | 836 | } |
829 | BUG_ON(j > MAX_SKB_FRAGS); | 837 | BUG_ON(j > MAX_SKB_FRAGS); |
830 | 838 | ||
839 | skb_shinfo(skb)->nr_frags = j + 1; | ||
840 | skb->len = pkt_size; | ||
841 | skb->data_len = pkt_size; | ||
842 | skb->truesize += pkt_size; | ||
843 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
844 | |||
831 | if (likely(!vlanf)) { | 845 | if (likely(!vlanf)) { |
832 | lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size, | 846 | napi_gro_frags(&eq_obj->napi); |
833 | pkt_size, NULL, 0); | ||
834 | } else { | 847 | } else { |
835 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); | 848 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); |
836 | vid = be16_to_cpu(vid); | 849 | vid = be16_to_cpu(vid); |
@@ -838,9 +851,7 @@ static void be_rx_compl_process_lro(struct be_adapter *adapter, | |||
838 | if (!adapter->vlan_grp || adapter->num_vlans == 0) | 851 | if (!adapter->vlan_grp || adapter->num_vlans == 0) |
839 | return; | 852 | return; |
840 | 853 | ||
841 | lro_vlan_hwaccel_receive_frags(&adapter->rx_obj.lro_mgr, | 854 | vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); |
842 | rx_frags, pkt_size, pkt_size, adapter->vlan_grp, | ||
843 | vid, NULL, 0); | ||
844 | } | 855 | } |
845 | 856 | ||
846 | be_rx_stats_update(adapter, pkt_size, num_rcvd); | 857 | be_rx_stats_update(adapter, pkt_size, num_rcvd); |
@@ -1183,7 +1194,6 @@ static int be_rx_queues_create(struct be_adapter *adapter) | |||
1183 | struct be_queue_info *eq, *q, *cq; | 1194 | struct be_queue_info *eq, *q, *cq; |
1184 | int rc; | 1195 | int rc; |
1185 | 1196 | ||
1186 | adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME; | ||
1187 | adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; | 1197 | adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; |
1188 | adapter->rx_eq.max_eqd = BE_MAX_EQD; | 1198 | adapter->rx_eq.max_eqd = BE_MAX_EQD; |
1189 | adapter->rx_eq.min_eqd = 0; | 1199 | adapter->rx_eq.min_eqd = 0; |
@@ -1305,7 +1315,7 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev) | |||
1305 | return IRQ_HANDLED; | 1315 | return IRQ_HANDLED; |
1306 | } | 1316 | } |
1307 | 1317 | ||
1308 | static inline bool do_lro(struct be_adapter *adapter, | 1318 | static inline bool do_gro(struct be_adapter *adapter, |
1309 | struct be_eth_rx_compl *rxcp) | 1319 | struct be_eth_rx_compl *rxcp) |
1310 | { | 1320 | { |
1311 | int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); | 1321 | int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); |
@@ -1314,8 +1324,7 @@ static inline bool do_lro(struct be_adapter *adapter, | |||
1314 | if (err) | 1324 | if (err) |
1315 | drvr_stats(adapter)->be_rxcp_err++; | 1325 | drvr_stats(adapter)->be_rxcp_err++; |
1316 | 1326 | ||
1317 | return (!tcp_frame || err || (adapter->max_rx_coal <= 1)) ? | 1327 | return (tcp_frame && !err) ? true : false; |
1318 | false : true; | ||
1319 | } | 1328 | } |
1320 | 1329 | ||
1321 | int be_poll_rx(struct napi_struct *napi, int budget) | 1330 | int be_poll_rx(struct napi_struct *napi, int budget) |
@@ -1332,16 +1341,14 @@ int be_poll_rx(struct napi_struct *napi, int budget) | |||
1332 | if (!rxcp) | 1341 | if (!rxcp) |
1333 | break; | 1342 | break; |
1334 | 1343 | ||
1335 | if (do_lro(adapter, rxcp)) | 1344 | if (do_gro(adapter, rxcp)) |
1336 | be_rx_compl_process_lro(adapter, rxcp); | 1345 | be_rx_compl_process_gro(adapter, rxcp); |
1337 | else | 1346 | else |
1338 | be_rx_compl_process(adapter, rxcp); | 1347 | be_rx_compl_process(adapter, rxcp); |
1339 | 1348 | ||
1340 | be_rx_compl_reset(rxcp); | 1349 | be_rx_compl_reset(rxcp); |
1341 | } | 1350 | } |
1342 | 1351 | ||
1343 | lro_flush_all(&adapter->rx_obj.lro_mgr); | ||
1344 | |||
1345 | /* Refill the queue */ | 1352 | /* Refill the queue */ |
1346 | if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM) | 1353 | if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM) |
1347 | be_post_rx_frags(adapter); | 1354 | be_post_rx_frags(adapter); |
@@ -1656,57 +1663,6 @@ static int be_close(struct net_device *netdev) | |||
1656 | return 0; | 1663 | return 0; |
1657 | } | 1664 | } |
1658 | 1665 | ||
1659 | static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, | ||
1660 | void **ip_hdr, void **tcpudp_hdr, | ||
1661 | u64 *hdr_flags, void *priv) | ||
1662 | { | ||
1663 | struct ethhdr *eh; | ||
1664 | struct vlan_ethhdr *veh; | ||
1665 | struct iphdr *iph; | ||
1666 | u8 *va = page_address(frag->page) + frag->page_offset; | ||
1667 | unsigned long ll_hlen; | ||
1668 | |||
1669 | prefetch(va); | ||
1670 | eh = (struct ethhdr *)va; | ||
1671 | *mac_hdr = eh; | ||
1672 | ll_hlen = ETH_HLEN; | ||
1673 | if (eh->h_proto != htons(ETH_P_IP)) { | ||
1674 | if (eh->h_proto == htons(ETH_P_8021Q)) { | ||
1675 | veh = (struct vlan_ethhdr *)va; | ||
1676 | if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) | ||
1677 | return -1; | ||
1678 | |||
1679 | ll_hlen += VLAN_HLEN; | ||
1680 | } else { | ||
1681 | return -1; | ||
1682 | } | ||
1683 | } | ||
1684 | *hdr_flags = LRO_IPV4; | ||
1685 | iph = (struct iphdr *)(va + ll_hlen); | ||
1686 | *ip_hdr = iph; | ||
1687 | if (iph->protocol != IPPROTO_TCP) | ||
1688 | return -1; | ||
1689 | *hdr_flags |= LRO_TCP; | ||
1690 | *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2); | ||
1691 | |||
1692 | return 0; | ||
1693 | } | ||
1694 | |||
1695 | static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev) | ||
1696 | { | ||
1697 | struct net_lro_mgr *lro_mgr; | ||
1698 | |||
1699 | lro_mgr = &adapter->rx_obj.lro_mgr; | ||
1700 | lro_mgr->dev = netdev; | ||
1701 | lro_mgr->features = LRO_F_NAPI; | ||
1702 | lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; | ||
1703 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
1704 | lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS; | ||
1705 | lro_mgr->lro_arr = adapter->rx_obj.lro_desc; | ||
1706 | lro_mgr->get_frag_header = be_get_frag_header; | ||
1707 | lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME; | ||
1708 | } | ||
1709 | |||
1710 | static struct net_device_ops be_netdev_ops = { | 1666 | static struct net_device_ops be_netdev_ops = { |
1711 | .ndo_open = be_open, | 1667 | .ndo_open = be_open, |
1712 | .ndo_stop = be_close, | 1668 | .ndo_stop = be_close, |
@@ -1727,7 +1683,7 @@ static void be_netdev_init(struct net_device *netdev) | |||
1727 | 1683 | ||
1728 | netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | | 1684 | netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | |
1729 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | | 1685 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | |
1730 | NETIF_F_IPV6_CSUM; | 1686 | NETIF_F_IPV6_CSUM | NETIF_F_GRO; |
1731 | 1687 | ||
1732 | netdev->flags |= IFF_MULTICAST; | 1688 | netdev->flags |= IFF_MULTICAST; |
1733 | 1689 | ||
@@ -1737,8 +1693,6 @@ static void be_netdev_init(struct net_device *netdev) | |||
1737 | 1693 | ||
1738 | SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); | 1694 | SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); |
1739 | 1695 | ||
1740 | be_lro_init(adapter, netdev); | ||
1741 | |||
1742 | netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, | 1696 | netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, |
1743 | BE_NAPI_WEIGHT); | 1697 | BE_NAPI_WEIGHT); |
1744 | netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, | 1698 | netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, |