aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorEilon Greenstein <eilong@broadcom.com>2008-06-23 23:35:13 -0400
committerDavid S. Miller <davem@davemloft.net>2008-06-23 23:35:13 -0400
commit755735eb3494630800f337eae33b92bab363f112 (patch)
tree7aaa4d813e2c2d324570e66c4be56007f7b49490 /drivers
parent7a9b25577c8a06d998fb11b28bf8229aa9623205 (diff)
bnx2x: Re-factor Tx code
Add support for IPv6 TSO Re-factor the Tx code with smaller functions to increase readability. Add linearization code in case packet is too fragmented for the microcode to handle. Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bnx2x_main.c444
1 files changed, 304 insertions, 140 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fabde5555e32..e97fe8cddac9 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -8302,10 +8302,14 @@ static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8302 8302
8303static int bnx2x_set_tso(struct net_device *dev, u32 data) 8303static int bnx2x_set_tso(struct net_device *dev, u32 data)
8304{ 8304{
8305 if (data) 8305 if (data) {
8306 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 8306 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8307 else 8307 dev->features |= NETIF_F_TSO6;
8308 } else {
8308 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN); 8309 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8310 dev->features &= ~NETIF_F_TSO6;
8311 }
8312
8309 return 0; 8313 return 0;
8310} 8314}
8311 8315
@@ -8339,10 +8343,6 @@ static void bnx2x_self_test(struct net_device *dev,
8339 buf[0] = 1; 8343 buf[0] = 1;
8340 etest->flags |= ETH_TEST_FL_FAILED; 8344 etest->flags |= ETH_TEST_FL_FAILED;
8341 } 8345 }
8342
8343#ifdef BNX2X_EXTRA_DEBUG
8344 bnx2x_panic_dump(bp);
8345#endif
8346} 8346}
8347 8347
8348static const struct { 8348static const struct {
@@ -8545,7 +8545,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = {
8545 .get_rx_csum = bnx2x_get_rx_csum, 8545 .get_rx_csum = bnx2x_get_rx_csum,
8546 .set_rx_csum = bnx2x_set_rx_csum, 8546 .set_rx_csum = bnx2x_set_rx_csum,
8547 .get_tx_csum = ethtool_op_get_tx_csum, 8547 .get_tx_csum = ethtool_op_get_tx_csum,
8548 .set_tx_csum = ethtool_op_set_tx_csum, 8548 .set_tx_csum = ethtool_op_set_tx_hw_csum,
8549 .set_flags = bnx2x_set_flags, 8549 .set_flags = bnx2x_set_flags,
8550 .get_flags = ethtool_op_get_flags, 8550 .get_flags = ethtool_op_get_flags,
8551 .get_sg = ethtool_op_get_sg, 8551 .get_sg = ethtool_op_get_sg,
@@ -8651,9 +8651,180 @@ poll_panic:
8651 return work_done; 8651 return work_done;
8652} 8652}
8653 8653
8654/* Called with netif_tx_lock. 8654
8655/* we split the first BD into headers and data BDs
8656 * to ease the pain of our fellow micocode engineers
8657 * we use one mapping for both BDs
8658 * So far this has only been observed to happen
8659 * in Other Operating Systems(TM)
8660 */
8661static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
8662 struct bnx2x_fastpath *fp,
8663 struct eth_tx_bd **tx_bd, u16 hlen,
8664 u16 bd_prod, int nbd)
8665{
8666 struct eth_tx_bd *h_tx_bd = *tx_bd;
8667 struct eth_tx_bd *d_tx_bd;
8668 dma_addr_t mapping;
8669 int old_len = le16_to_cpu(h_tx_bd->nbytes);
8670
8671 /* first fix first BD */
8672 h_tx_bd->nbd = cpu_to_le16(nbd);
8673 h_tx_bd->nbytes = cpu_to_le16(hlen);
8674
8675 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
8676 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
8677 h_tx_bd->addr_lo, h_tx_bd->nbd);
8678
8679 /* now get a new data BD
8680 * (after the pbd) and fill it */
8681 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8682 d_tx_bd = &fp->tx_desc_ring[bd_prod];
8683
8684 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
8685 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
8686
8687 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8688 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8689 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
8690 d_tx_bd->vlan = 0;
8691 /* this marks the BD as one that has no individual mapping
8692 * the FW ignores this flag in a BD not marked start
8693 */
8694 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
8695 DP(NETIF_MSG_TX_QUEUED,
8696 "TSO split data size is %d (%x:%x)\n",
8697 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
8698
8699 /* update tx_bd for marking the last BD flag */
8700 *tx_bd = d_tx_bd;
8701
8702 return bd_prod;
8703}
8704
8705static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
8706{
8707 if (fix > 0)
8708 csum = (u16) ~csum_fold(csum_sub(csum,
8709 csum_partial(t_header - fix, fix, 0)));
8710
8711 else if (fix < 0)
8712 csum = (u16) ~csum_fold(csum_add(csum,
8713 csum_partial(t_header, -fix, 0)));
8714
8715 return swab16(csum);
8716}
8717
8718static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
8719{
8720 u32 rc;
8721
8722 if (skb->ip_summed != CHECKSUM_PARTIAL)
8723 rc = XMIT_PLAIN;
8724
8725 else {
8726 if (skb->protocol == ntohs(ETH_P_IPV6)) {
8727 rc = XMIT_CSUM_V6;
8728 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
8729 rc |= XMIT_CSUM_TCP;
8730
8731 } else {
8732 rc = XMIT_CSUM_V4;
8733 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
8734 rc |= XMIT_CSUM_TCP;
8735 }
8736 }
8737
8738 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
8739 rc |= XMIT_GSO_V4;
8740
8741 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
8742 rc |= XMIT_GSO_V6;
8743
8744 return rc;
8745}
8746
8747/* check if packet requires linearization (packet is too fragmented) */
8748static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
8749 u32 xmit_type)
8750{
8751 int to_copy = 0;
8752 int hlen = 0;
8753 int first_bd_sz = 0;
8754
8755 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
8756 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
8757
8758 if (xmit_type & XMIT_GSO) {
8759 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
8760 /* Check if LSO packet needs to be copied:
8761 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
8762 int wnd_size = MAX_FETCH_BD - 3;
8763 /* Number of widnows to check */
8764 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
8765 int wnd_idx = 0;
8766 int frag_idx = 0;
8767 u32 wnd_sum = 0;
8768
8769 /* Headers length */
8770 hlen = (int)(skb_transport_header(skb) - skb->data) +
8771 tcp_hdrlen(skb);
8772
8773 /* Amount of data (w/o headers) on linear part of SKB*/
8774 first_bd_sz = skb_headlen(skb) - hlen;
8775
8776 wnd_sum = first_bd_sz;
8777
8778 /* Calculate the first sum - it's special */
8779 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
8780 wnd_sum +=
8781 skb_shinfo(skb)->frags[frag_idx].size;
8782
8783 /* If there was data on linear skb data - check it */
8784 if (first_bd_sz > 0) {
8785 if (unlikely(wnd_sum < lso_mss)) {
8786 to_copy = 1;
8787 goto exit_lbl;
8788 }
8789
8790 wnd_sum -= first_bd_sz;
8791 }
8792
8793 /* Others are easier: run through the frag list and
8794 check all windows */
8795 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
8796 wnd_sum +=
8797 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
8798
8799 if (unlikely(wnd_sum < lso_mss)) {
8800 to_copy = 1;
8801 break;
8802 }
8803 wnd_sum -=
8804 skb_shinfo(skb)->frags[wnd_idx].size;
8805 }
8806
8807 } else {
8808 /* in non-LSO too fragmented packet should always
8809 be linearized */
8810 to_copy = 1;
8811 }
8812 }
8813
8814exit_lbl:
8815 if (unlikely(to_copy))
8816 DP(NETIF_MSG_TX_QUEUED,
8817 "Linearization IS REQUIRED for %s packet. "
8818 "num_frags %d hlen %d first_bd_sz %d\n",
8819 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
8820 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
8821
8822 return to_copy;
8823}
8824
8825/* called with netif_tx_lock
8655 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 8826 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
8656 * netif_wake_queue(). 8827 * netif_wake_queue()
8657 */ 8828 */
8658static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 8829static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8659{ 8830{
@@ -8663,17 +8834,21 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8663 struct eth_tx_bd *tx_bd; 8834 struct eth_tx_bd *tx_bd;
8664 struct eth_tx_parse_bd *pbd = NULL; 8835 struct eth_tx_parse_bd *pbd = NULL;
8665 u16 pkt_prod, bd_prod; 8836 u16 pkt_prod, bd_prod;
8666 int nbd, fp_index = 0; 8837 int nbd, fp_index;
8667 dma_addr_t mapping; 8838 dma_addr_t mapping;
8839 u32 xmit_type = bnx2x_xmit_type(bp, skb);
8840 int vlan_off = (bp->e1hov ? 4 : 0);
8841 int i;
8842 u8 hlen = 0;
8668 8843
8669#ifdef BNX2X_STOP_ON_ERROR 8844#ifdef BNX2X_STOP_ON_ERROR
8670 if (unlikely(bp->panic)) 8845 if (unlikely(bp->panic))
8671 return NETDEV_TX_BUSY; 8846 return NETDEV_TX_BUSY;
8672#endif 8847#endif
8673 8848
8674 fp_index = smp_processor_id() % (bp->num_queues); 8849 fp_index = (smp_processor_id() % bp->num_queues);
8675
8676 fp = &bp->fp[fp_index]; 8850 fp = &bp->fp[fp_index];
8851
8677 if (unlikely(bnx2x_tx_avail(bp->fp) < 8852 if (unlikely(bnx2x_tx_avail(bp->fp) <
8678 (skb_shinfo(skb)->nr_frags + 3))) { 8853 (skb_shinfo(skb)->nr_frags + 3))) {
8679 bp->eth_stats.driver_xoff++, 8854 bp->eth_stats.driver_xoff++,
@@ -8682,20 +8857,37 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8682 return NETDEV_TX_BUSY; 8857 return NETDEV_TX_BUSY;
8683 } 8858 }
8684 8859
8860 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
8861 " gso type %x xmit_type %x\n",
8862 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
8863 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
8864
8865 /* First, check if we need to linearaize the skb
8866 (due to FW restrictions) */
8867 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
8868 /* Statistics of linearization */
8869 bp->lin_cnt++;
8870 if (skb_linearize(skb) != 0) {
8871 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
8872 "silently dropping this SKB\n");
8873 dev_kfree_skb_any(skb);
8874 return 0;
8875 }
8876 }
8877
8685 /* 8878 /*
8686 This is a bit ugly. First we use one BD which we mark as start, 8879 Please read carefully. First we use one BD which we mark as start,
8687 then for TSO or xsum we have a parsing info BD, 8880 then for TSO or xsum we have a parsing info BD,
8688 and only then we have the rest of the TSO bds. 8881 and only then we have the rest of the TSO BDs.
8689 (don't forget to mark the last one as last, 8882 (don't forget to mark the last one as last,
8690 and to unmap only AFTER you write to the BD ...) 8883 and to unmap only AFTER you write to the BD ...)
8691 I would like to thank DovH for this mess. 8884 And above all, all pdb sizes are in words - NOT DWORDS!
8692 */ 8885 */
8693 8886
8694 pkt_prod = fp->tx_pkt_prod++; 8887 pkt_prod = fp->tx_pkt_prod++;
8695 bd_prod = fp->tx_bd_prod; 8888 bd_prod = TX_BD(fp->tx_bd_prod);
8696 bd_prod = TX_BD(bd_prod);
8697 8889
8698 /* get a tx_buff and first bd */ 8890 /* get a tx_buf and first BD */
8699 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; 8891 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8700 tx_bd = &fp->tx_desc_ring[bd_prod]; 8892 tx_bd = &fp->tx_desc_ring[bd_prod];
8701 8893
@@ -8704,65 +8896,80 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8704 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); 8896 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
8705 tx_bd->general_data |= 1; /* header nbd */ 8897 tx_bd->general_data |= 1; /* header nbd */
8706 8898
8707 /* remember the first bd of the packet */ 8899 /* remember the first BD of the packet */
8708 tx_buf->first_bd = bd_prod; 8900 tx_buf->first_bd = fp->tx_bd_prod;
8901 tx_buf->skb = skb;
8709 8902
8710 DP(NETIF_MSG_TX_QUEUED, 8903 DP(NETIF_MSG_TX_QUEUED,
8711 "sending pkt %u @%p next_idx %u bd %u @%p\n", 8904 "sending pkt %u @%p next_idx %u bd %u @%p\n",
8712 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd); 8905 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
8713 8906
8714 if (skb->ip_summed == CHECKSUM_PARTIAL) { 8907 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
8715 struct iphdr *iph = ip_hdr(skb); 8908 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
8716 u8 len; 8909 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
8910 vlan_off += 4;
8911 } else
8912 tx_bd->vlan = cpu_to_le16(pkt_prod);
8717 8913
8718 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; 8914 if (xmit_type) {
8719 8915
8720 /* turn on parsing and get a bd */ 8916 /* turn on parsing and get a BD */
8721 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 8917 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8722 pbd = (void *)&fp->tx_desc_ring[bd_prod]; 8918 pbd = (void *)&fp->tx_desc_ring[bd_prod];
8723 len = ((u8 *)iph - (u8 *)skb->data) / 2; 8919
8920 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
8921 }
8922
8923 if (xmit_type & XMIT_CSUM) {
8924 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
8724 8925
8725 /* for now NS flag is not used in Linux */ 8926 /* for now NS flag is not used in Linux */
8726 pbd->global_data = (len | 8927 pbd->global_data = (hlen |
8727 ((skb->protocol == ntohs(ETH_P_8021Q)) << 8928 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
8728 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); 8929 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
8729 pbd->ip_hlen = ip_hdrlen(skb) / 2;
8730 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
8731 if (iph->protocol == IPPROTO_TCP) {
8732 struct tcphdr *th = tcp_hdr(skb);
8733 8930
8734 tx_bd->bd_flags.as_bitfield |= 8931 pbd->ip_hlen = (skb_transport_header(skb) -
8735 ETH_TX_BD_FLAGS_TCP_CSUM; 8932 skb_network_header(skb)) / 2;
8736 pbd->tcp_flags = pbd_tcp_flags(skb); 8933
8737 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2); 8934 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
8738 pbd->tcp_pseudo_csum = swab16(th->check);
8739 8935
8740 } else if (iph->protocol == IPPROTO_UDP) { 8936 pbd->total_hlen = cpu_to_le16(hlen);
8741 struct udphdr *uh = udp_hdr(skb); 8937 hlen = hlen*2 - vlan_off;
8742 8938
8939 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
8940
8941 if (xmit_type & XMIT_CSUM_V4)
8743 tx_bd->bd_flags.as_bitfield |= 8942 tx_bd->bd_flags.as_bitfield |=
8744 ETH_TX_BD_FLAGS_TCP_CSUM; 8943 ETH_TX_BD_FLAGS_IP_CSUM;
8745 pbd->total_hlen += cpu_to_le16(4); 8944 else
8945 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
8946
8947 if (xmit_type & XMIT_CSUM_TCP) {
8948 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
8949
8950 } else {
8951 s8 fix = SKB_CS_OFF(skb); /* signed! */
8952
8746 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG; 8953 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
8747 pbd->cs_offset = 5; /* 10 >> 1 */ 8954 pbd->cs_offset = fix / 2;
8748 pbd->tcp_pseudo_csum = 0;
8749 /* HW bug: we need to subtract 10 bytes before the
8750 * UDP header from the csum
8751 */
8752 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
8753 csum_partial(((u8 *)(uh)-10), 10, 0)));
8754 }
8755 }
8756 8955
8757 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) { 8956 DP(NETIF_MSG_TX_QUEUED,
8758 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 8957 "hlen %d offset %d fix %d csum before fix %x\n",
8759 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; 8958 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
8760 } else { 8959 SKB_CS(skb));
8761 tx_bd->vlan = cpu_to_le16(pkt_prod); 8960
8961 /* HW bug: fixup the CSUM */
8962 pbd->tcp_pseudo_csum =
8963 bnx2x_csum_fix(skb_transport_header(skb),
8964 SKB_CS(skb), fix);
8965
8966 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
8967 pbd->tcp_pseudo_csum);
8968 }
8762 } 8969 }
8763 8970
8764 mapping = pci_map_single(bp->pdev, skb->data, 8971 mapping = pci_map_single(bp->pdev, skb->data,
8765 skb->len, PCI_DMA_TODEVICE); 8972 skb_headlen(skb), PCI_DMA_TODEVICE);
8766 8973
8767 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 8974 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8768 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 8975 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -8771,13 +8978,12 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8771 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 8978 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8772 8979
8773 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" 8980 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
8774 " nbytes %d flags %x vlan %u\n", 8981 " nbytes %d flags %x vlan %x\n",
8775 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd, 8982 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
8776 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan); 8983 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
8984 le16_to_cpu(tx_bd->vlan));
8777 8985
8778 if (skb_shinfo(skb)->gso_size && 8986 if (xmit_type & XMIT_GSO) {
8779 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
8780 int hlen = 2 * le16_to_cpu(pbd->total_hlen);
8781 8987
8782 DP(NETIF_MSG_TX_QUEUED, 8988 DP(NETIF_MSG_TX_QUEUED,
8783 "TSO packet len %d hlen %d total len %d tso size %d\n", 8989 "TSO packet len %d hlen %d total len %d tso size %d\n",
@@ -8786,99 +8992,60 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8786 8992
8787 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 8993 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
8788 8994
8789 if (tx_bd->nbytes > cpu_to_le16(hlen)) { 8995 if (unlikely(skb_headlen(skb) > hlen))
8790 /* we split the first bd into headers and data bds 8996 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
8791 * to ease the pain of our fellow micocode engineers 8997 bd_prod, ++nbd);
8792 * we use one mapping for both bds
8793 * So far this has only been observed to happen
8794 * in Other Operating Systems(TM)
8795 */
8796
8797 /* first fix first bd */
8798 nbd++;
8799 tx_bd->nbd = cpu_to_le16(nbd);
8800 tx_bd->nbytes = cpu_to_le16(hlen);
8801
8802 /* we only print this as an error
8803 * because we don't think this will ever happen.
8804 */
8805 BNX2X_ERR("TSO split header size is %d (%x:%x)"
8806 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
8807 tx_bd->addr_lo, tx_bd->nbd);
8808
8809 /* now get a new data bd
8810 * (after the pbd) and fill it */
8811 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8812 tx_bd = &fp->tx_desc_ring[bd_prod];
8813
8814 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8815 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
8816 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
8817 tx_bd->vlan = cpu_to_le16(pkt_prod);
8818 /* this marks the bd
8819 * as one that has no individual mapping
8820 * the FW ignores this flag in a bd not marked start
8821 */
8822 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
8823 DP(NETIF_MSG_TX_QUEUED,
8824 "TSO split data size is %d (%x:%x)\n",
8825 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
8826 }
8827
8828 if (!pbd) {
8829 /* supposed to be unreached
8830 * (and therefore not handled properly...)
8831 */
8832 BNX2X_ERR("LSO with no PBD\n");
8833 BUG();
8834 }
8835 8998
8836 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 8999 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
8837 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); 9000 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
8838 pbd->ip_id = swab16(ip_hdr(skb)->id); 9001 pbd->tcp_flags = pbd_tcp_flags(skb);
8839 pbd->tcp_pseudo_csum = 9002
9003 if (xmit_type & XMIT_GSO_V4) {
9004 pbd->ip_id = swab16(ip_hdr(skb)->id);
9005 pbd->tcp_pseudo_csum =
8840 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, 9006 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
8841 ip_hdr(skb)->daddr, 9007 ip_hdr(skb)->daddr,
8842 0, IPPROTO_TCP, 0)); 9008 0, IPPROTO_TCP, 0));
9009
9010 } else
9011 pbd->tcp_pseudo_csum =
9012 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9013 &ipv6_hdr(skb)->daddr,
9014 0, IPPROTO_TCP, 0));
9015
8843 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; 9016 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
8844 } 9017 }
8845 9018
8846 { 9019 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
8847 int i; 9020 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8848 9021
8849 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 9022 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8850 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 9023 tx_bd = &fp->tx_desc_ring[bd_prod];
8851 9024
8852 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 9025 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
8853 tx_bd = &fp->tx_desc_ring[bd_prod]; 9026 frag->size, PCI_DMA_TODEVICE);
8854 9027
8855 mapping = pci_map_page(bp->pdev, frag->page, 9028 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8856 frag->page_offset, 9029 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8857 frag->size, PCI_DMA_TODEVICE); 9030 tx_bd->nbytes = cpu_to_le16(frag->size);
9031 tx_bd->vlan = cpu_to_le16(pkt_prod);
9032 tx_bd->bd_flags.as_bitfield = 0;
8858 9033
8859 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 9034 DP(NETIF_MSG_TX_QUEUED,
8860 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 9035 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
8861 tx_bd->nbytes = cpu_to_le16(frag->size); 9036 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
8862 tx_bd->vlan = cpu_to_le16(pkt_prod); 9037 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
8863 tx_bd->bd_flags.as_bitfield = 0;
8864 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
8865 " addr (%x:%x) nbytes %d flags %x\n",
8866 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
8867 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
8868 } /* for */
8869 } 9038 }
8870 9039
8871 /* now at last mark the bd as the last bd */ 9040 /* now at last mark the BD as the last BD */
8872 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD; 9041 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
8873 9042
8874 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n", 9043 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
8875 tx_bd, tx_bd->bd_flags.as_bitfield); 9044 tx_bd, tx_bd->bd_flags.as_bitfield);
8876 9045
8877 tx_buf->skb = skb;
8878
8879 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 9046 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8880 9047
8881 /* now send a tx doorbell, counting the next bd 9048 /* now send a tx doorbell, counting the next BD
8882 * if the packet contains or ends with it 9049 * if the packet contains or ends with it
8883 */ 9050 */
8884 if (TX_BD_POFF(bd_prod) < nbd) 9051 if (TX_BD_POFF(bd_prod) < nbd)
@@ -8890,20 +9057,20 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8890 " tcp_flags %x xsum %x seq %u hlen %u\n", 9057 " tcp_flags %x xsum %x seq %u hlen %u\n",
8891 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, 9058 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
8892 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, 9059 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
8893 pbd->tcp_send_seq, pbd->total_hlen); 9060 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
8894 9061
8895 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod); 9062 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
8896 9063
8897 fp->hw_tx_prods->bds_prod = 9064 fp->hw_tx_prods->bds_prod =
8898 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd); 9065 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
8899 mb(); /* FW restriction: must not reorder writing nbd and packets */ 9066 mb(); /* FW restriction: must not reorder writing nbd and packets */
8900 fp->hw_tx_prods->packets_prod = 9067 fp->hw_tx_prods->packets_prod =
8901 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1); 9068 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8902 DOORBELL(bp, fp_index, 0); 9069 DOORBELL(bp, FP_IDX(fp), 0);
8903 9070
8904 mmiowb(); 9071 mmiowb();
8905 9072
8906 fp->tx_bd_prod = bd_prod; 9073 fp->tx_bd_prod += nbd;
8907 dev->trans_start = jiffies; 9074 dev->trans_start = jiffies;
8908 9075
8909 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { 9076 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
@@ -9331,10 +9498,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9331 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 9498 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9332#endif 9499#endif
9333 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 9500 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9334 9501 dev->features |= NETIF_F_TSO6;
9335 bp->timer_interval = HZ;
9336 bp->current_interval = (poll ? poll : HZ);
9337
9338 9502
9339 return 0; 9503 return 0;
9340 9504