aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-05-27 01:31:52 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-06-25 03:06:51 -0400
commita535c30e9e98d201089503a0ffa0093cba16e796 (patch)
tree5e15e3c107edf07b3d6e2d4dc834a43cdf75b52e /drivers/net/ixgbe/ixgbe_main.c
parent897ab15606ce896b6a574a263beb51cbfb43f041 (diff)
ixgbe: Update method used for determining descriptor count for an skb
This patch updates the current methods used for determining if we have enough space to transmit a given skb. The current method is quite wasteful as it has us go through and determine how each page is going to be broken up. That only needs to be done if pages are larger than our maximum data per TXD. As such I have wrapped that in a page size check. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c55
1 files changed, 26 insertions, 29 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 00e60c5ab27..305e1e4f80b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -772,15 +772,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
772 return ret; 772 return ret;
773} 773}
774 774
775#define IXGBE_MAX_TXD_PWR 14
776#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
777
778/* Tx Descriptors needed, worst case */
779#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
780 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
781#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
782 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
783
784/** 775/**
785 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout 776 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
786 * @adapter: driver private struct 777 * @adapter: driver private struct
@@ -6832,14 +6823,34 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6832 struct ixgbe_adapter *adapter, 6823 struct ixgbe_adapter *adapter,
6833 struct ixgbe_ring *tx_ring) 6824 struct ixgbe_ring *tx_ring)
6834{ 6825{
6835 unsigned int tx_flags = 0;
6836 int tso; 6826 int tso;
6837 u16 count = 0; 6827 u32 tx_flags = 0;
6828#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6829 unsigned short f;
6830#endif
6838 u16 first; 6831 u16 first;
6839 unsigned int f; 6832 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6840 __be16 protocol; 6833 __be16 protocol;
6841 u8 hdr_len = 0; 6834 u8 hdr_len = 0;
6842 6835
6836 /*
6837 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
6838 * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
6839 * + 2 desc gap to keep tail from touching head,
6840 * + 1 desc for context descriptor,
6841 * otherwise try next time
6842 */
6843#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6844 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6845 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6846#else
6847 count += skb_shinfo(skb)->nr_frags;
6848#endif
6849 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
6850 tx_ring->tx_stats.tx_busy++;
6851 return NETDEV_TX_BUSY;
6852 }
6853
6843 protocol = vlan_get_protocol(skb); 6854 protocol = vlan_get_protocol(skb);
6844 6855
6845 if (vlan_tx_tag_present(skb)) { 6856 if (vlan_tx_tag_present(skb)) {
@@ -6863,25 +6874,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6863 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6874 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6864 (protocol == htons(ETH_P_FCOE))) 6875 (protocol == htons(ETH_P_FCOE)))
6865 tx_flags |= IXGBE_TX_FLAGS_FCOE; 6876 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6866#endif
6867
6868 /* four things can cause us to need a context descriptor */
6869 if (skb_is_gso(skb) ||
6870 (skb->ip_summed == CHECKSUM_PARTIAL) ||
6871 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
6872 (tx_flags & IXGBE_TX_FLAGS_FCOE))
6873 count++;
6874
6875 count += TXD_USE_COUNT(skb_headlen(skb));
6876 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6877 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6878
6879 if (ixgbe_maybe_stop_tx(tx_ring, count)) {
6880 tx_ring->tx_stats.tx_busy++;
6881 return NETDEV_TX_BUSY;
6882 }
6883 6877
6878#endif
6879 /* record the location of the first descriptor for this packet */
6884 first = tx_ring->next_to_use; 6880 first = tx_ring->next_to_use;
6881
6885 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 6882 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6886#ifdef IXGBE_FCOE 6883#ifdef IXGBE_FCOE
6887 /* setup tx offload for FCoE */ 6884 /* setup tx offload for FCoE */