diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2012-05-11 04:32:40 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2012-07-17 05:47:41 -0400 |
commit | 3595990a9ccc1b819bfe801940eb05f8a84f253e (patch) | |
tree | 8d7af4a336dad7ffbd7bb2a5fa4104d984fafeb8 | |
parent | e2c28ce76001f01fefb255e0ce1fd6819a2ad1ea (diff) |
ixgbevf: Cleanup accounting for space needed at start of xmit_frame
This change cleans up the accounting needed at the start of xmit_frame so
that we can avoid doing too much work to determine how many descriptors we
will need.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Greg Rose <gregory.v.rose@intel.com>
Tested-by: Sibai Li <sibai.li@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 46 |
1 files changed, 24 insertions, 22 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 75af1920b0f7..855bb21824fe 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -175,10 +175,8 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, | |||
175 | #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) | 175 | #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) |
176 | 176 | ||
177 | /* Tx Descriptors needed, worst case */ | 177 | /* Tx Descriptors needed, worst case */ |
178 | #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ | 178 | #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) |
179 | (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) | 179 | #define DESC_NEEDED (MAX_SKB_FRAGS + 4) |
180 | #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ | ||
181 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ | ||
182 | 180 | ||
183 | static void ixgbevf_tx_timeout(struct net_device *netdev); | 181 | static void ixgbevf_tx_timeout(struct net_device *netdev); |
184 | 182 | ||
@@ -2932,33 +2930,37 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2932 | unsigned int tx_flags = 0; | 2930 | unsigned int tx_flags = 0; |
2933 | u8 hdr_len = 0; | 2931 | u8 hdr_len = 0; |
2934 | int r_idx = 0, tso; | 2932 | int r_idx = 0, tso; |
2935 | int count = 0; | 2933 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); |
2936 | 2934 | #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD | |
2937 | unsigned int f; | 2935 | unsigned short f; |
2936 | #endif | ||
2938 | 2937 | ||
2939 | tx_ring = &adapter->tx_ring[r_idx]; | 2938 | tx_ring = &adapter->tx_ring[r_idx]; |
2940 | 2939 | ||
2940 | /* | ||
2941 | * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, | ||
2942 | * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, | ||
2943 | * + 2 desc gap to keep tail from touching head, | ||
2944 | * + 1 desc for context descriptor, | ||
2945 | * otherwise try next time | ||
2946 | */ | ||
2947 | #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD | ||
2948 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
2949 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); | ||
2950 | #else | ||
2951 | count += skb_shinfo(skb)->nr_frags; | ||
2952 | #endif | ||
2953 | if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count + 3)) { | ||
2954 | adapter->tx_busy++; | ||
2955 | return NETDEV_TX_BUSY; | ||
2956 | } | ||
2957 | |||
2941 | if (vlan_tx_tag_present(skb)) { | 2958 | if (vlan_tx_tag_present(skb)) { |
2942 | tx_flags |= vlan_tx_tag_get(skb); | 2959 | tx_flags |= vlan_tx_tag_get(skb); |
2943 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 2960 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
2944 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 2961 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
2945 | } | 2962 | } |
2946 | 2963 | ||
2947 | /* four things can cause us to need a context descriptor */ | ||
2948 | if (skb_is_gso(skb) || | ||
2949 | (skb->ip_summed == CHECKSUM_PARTIAL) || | ||
2950 | (tx_flags & IXGBE_TX_FLAGS_VLAN)) | ||
2951 | count++; | ||
2952 | |||
2953 | count += TXD_USE_COUNT(skb_headlen(skb)); | ||
2954 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
2955 | count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f])); | ||
2956 | |||
2957 | if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) { | ||
2958 | adapter->tx_busy++; | ||
2959 | return NETDEV_TX_BUSY; | ||
2960 | } | ||
2961 | |||
2962 | first = tx_ring->next_to_use; | 2964 | first = tx_ring->next_to_use; |
2963 | 2965 | ||
2964 | if (skb->protocol == htons(ETH_P_IP)) | 2966 | if (skb->protocol == htons(ETH_P_IP)) |