aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-11-16 22:26:47 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2010-11-16 22:26:47 -0500
commit8ad494b0e59950e2b4e587c32cb67a2452795ea0 (patch)
tree0a02398ca7563ef478a9ca8fde519daa86522c82 /drivers/net/ixgbe/ixgbe_main.c
parent4c0ec6544a0cd5e3eed08df2c14cf98185098abe (diff)
ixgbe: move GSO segments and byte count processing into ixgbe_tx_map
This change simplifies the work being done by the TX interrupt handler and pushes it into the tx_map call. This allows for fewer cache misses since the TX cleanup now accesses almost none of the skb members. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c57
1 files changed, 25 insertions, 32 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 45d988741fe9..480f0b0f038a 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -749,45 +749,23 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
749 bool cleaned = false; 749 bool cleaned = false;
750 rmb(); /* read buffer_info after eop_desc */ 750 rmb(); /* read buffer_info after eop_desc */
751 for ( ; !cleaned; count++) { 751 for ( ; !cleaned; count++) {
752 struct sk_buff *skb;
753 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); 752 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
754 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 753 tx_buffer_info = &tx_ring->tx_buffer_info[i];
754
755 tx_desc->wb.status = 0;
755 cleaned = (i == eop); 756 cleaned = (i == eop);
756 skb = tx_buffer_info->skb;
757 757
758 if (cleaned && skb) { 758 i++;
759 unsigned int segs, bytecount; 759 if (i == tx_ring->count)
760 unsigned int hlen = skb_headlen(skb); 760 i = 0;
761 761
762 /* gso_segs is currently only valid for tcp */ 762 if (cleaned && tx_buffer_info->skb) {
763 segs = skb_shinfo(skb)->gso_segs ?: 1; 763 total_bytes += tx_buffer_info->bytecount;
764#ifdef IXGBE_FCOE 764 total_packets += tx_buffer_info->gso_segs;
765 /* adjust for FCoE Sequence Offload */
766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
767 && skb_is_gso(skb)
768 && vlan_get_protocol(skb) ==
769 htons(ETH_P_FCOE)) {
770 hlen = skb_transport_offset(skb) +
771 sizeof(struct fc_frame_header) +
772 sizeof(struct fcoe_crc_eof);
773 segs = DIV_ROUND_UP(skb->len - hlen,
774 skb_shinfo(skb)->gso_size);
775 }
776#endif /* IXGBE_FCOE */
777 /* multiply data chunks by size of headers */
778 bytecount = ((segs - 1) * hlen) + skb->len;
779 total_packets += segs;
780 total_bytes += bytecount;
781 } 765 }
782 766
783 ixgbe_unmap_and_free_tx_resource(adapter, 767 ixgbe_unmap_and_free_tx_resource(adapter,
784 tx_buffer_info); 768 tx_buffer_info);
785
786 tx_desc->wb.status = 0;
787
788 i++;
789 if (i == tx_ring->count)
790 i = 0;
791 } 769 }
792 770
793 eop = tx_ring->tx_buffer_info[i].next_to_watch; 771 eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -6015,7 +5993,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
6015static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 5993static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6016 struct ixgbe_ring *tx_ring, 5994 struct ixgbe_ring *tx_ring,
6017 struct sk_buff *skb, u32 tx_flags, 5995 struct sk_buff *skb, u32 tx_flags,
6018 unsigned int first) 5996 unsigned int first, const u8 hdr_len)
6019{ 5997{
6020 struct pci_dev *pdev = adapter->pdev; 5998 struct pci_dev *pdev = adapter->pdev;
6021 struct ixgbe_tx_buffer *tx_buffer_info; 5999 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -6024,6 +6002,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6024 unsigned int offset = 0, size, count = 0, i; 6002 unsigned int offset = 0, size, count = 0, i;
6025 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 6003 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
6026 unsigned int f; 6004 unsigned int f;
6005 unsigned int bytecount = skb->len;
6006 u16 gso_segs = 1;
6027 6007
6028 i = tx_ring->next_to_use; 6008 i = tx_ring->next_to_use;
6029 6009
@@ -6093,6 +6073,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6093 break; 6073 break;
6094 } 6074 }
6095 6075
6076 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6077 gso_segs = skb_shinfo(skb)->gso_segs;
6078#ifdef IXGBE_FCOE
6079 /* adjust for FCoE Sequence Offload */
6080 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6081 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6082 skb_shinfo(skb)->gso_size);
6083#endif /* IXGBE_FCOE */
6084 bytecount += (gso_segs - 1) * hdr_len;
6085
6086 /* multiply data chunks by size of headers */
6087 tx_ring->tx_buffer_info[i].bytecount = bytecount;
6088 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
6096 tx_ring->tx_buffer_info[i].skb = skb; 6089 tx_ring->tx_buffer_info[i].skb = skb;
6097 tx_ring->tx_buffer_info[first].next_to_watch = i; 6090 tx_ring->tx_buffer_info[first].next_to_watch = i;
6098 6091
@@ -6402,7 +6395,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6402 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6395 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6403 } 6396 }
6404 6397
6405 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); 6398 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
6406 if (count) { 6399 if (count) {
6407 /* add the ATR filter if ATR is on */ 6400 /* add the ATR filter if ATR is on */
6408 if (tx_ring->atr_sample_rate) { 6401 if (tx_ring->atr_sample_rate) {