aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-10-07 16:30:23 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-08 16:03:32 -0400
commit4567dc10934292a3171f98d28817d58127b73b72 (patch)
tree9ad62d0d025d2dfa1d792b87ac5dc5eb1b1f5f29
parent44783d87512999fe1450ff2cdf26c1ddc3fa5eea (diff)
i40e: skb->xmit_more support
Support skb->xmit_more in i40e is straightforward : we need to move around i40e_maybe_stop_tx() call to correctly test netif_xmit_stopped() before taking the decision to not kick the NIC. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Acked-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c90
1 files changed, 46 insertions, 44 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 267992b3de8a..3195d82e4942 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2053,6 +2053,47 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2053} 2053}
2054 2054
2055/** 2055/**
2056 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2057 * @tx_ring: the ring to be checked
2058 * @size: the size buffer we want to assure is available
2059 *
2060 * Returns -EBUSY if a stop is needed, else 0
2061 **/
2062static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2063{
2064 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2065 /* Memory barrier before checking head and tail */
2066 smp_mb();
2067
2068 /* Check again in a case another CPU has just made room available. */
2069 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2070 return -EBUSY;
2071
2072 /* A reprieve! - use start_queue because it doesn't call schedule */
2073 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2074 ++tx_ring->tx_stats.restart_queue;
2075 return 0;
2076}
2077
2078/**
2079 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2080 * @tx_ring: the ring to be checked
2081 * @size: the size buffer we want to assure is available
2082 *
2083 * Returns 0 if stop is not needed
2084 **/
2085#ifdef I40E_FCOE
2086int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2087#else
2088static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2089#endif
2090{
2091 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2092 return 0;
2093 return __i40e_maybe_stop_tx(tx_ring, size);
2094}
2095
2096/**
2056 * i40e_tx_map - Build the Tx descriptor 2097 * i40e_tx_map - Build the Tx descriptor
2057 * @tx_ring: ring to send buffer on 2098 * @tx_ring: ring to send buffer on
2058 * @skb: send buffer 2099 * @skb: send buffer
@@ -2195,8 +2236,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2195 2236
2196 tx_ring->next_to_use = i; 2237 tx_ring->next_to_use = i;
2197 2238
2239 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2198 /* notify HW of packet */ 2240 /* notify HW of packet */
2199 writel(i, tx_ring->tail); 2241 if (!skb->xmit_more ||
2242 netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2243 tx_ring->queue_index)))
2244 writel(i, tx_ring->tail);
2200 2245
2201 return; 2246 return;
2202 2247
@@ -2218,47 +2263,6 @@ dma_error:
2218} 2263}
2219 2264
2220/** 2265/**
2221 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2222 * @tx_ring: the ring to be checked
2223 * @size: the size buffer we want to assure is available
2224 *
2225 * Returns -EBUSY if a stop is needed, else 0
2226 **/
2227static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2228{
2229 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2230 /* Memory barrier before checking head and tail */
2231 smp_mb();
2232
2233 /* Check again in a case another CPU has just made room available. */
2234 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2235 return -EBUSY;
2236
2237 /* A reprieve! - use start_queue because it doesn't call schedule */
2238 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2239 ++tx_ring->tx_stats.restart_queue;
2240 return 0;
2241}
2242
2243/**
2244 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2245 * @tx_ring: the ring to be checked
2246 * @size: the size buffer we want to assure is available
2247 *
2248 * Returns 0 if stop is not needed
2249 **/
2250#ifdef I40E_FCOE
2251int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2252#else
2253static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2254#endif
2255{
2256 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2257 return 0;
2258 return __i40e_maybe_stop_tx(tx_ring, size);
2259}
2260
2261/**
2262 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed 2266 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2263 * @skb: send buffer 2267 * @skb: send buffer
2264 * @tx_ring: ring to send buffer on 2268 * @tx_ring: ring to send buffer on
@@ -2372,8 +2376,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2372 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, 2376 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2373 td_cmd, td_offset); 2377 td_cmd, td_offset);
2374 2378
2375 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2376
2377 return NETDEV_TX_OK; 2379 return NETDEV_TX_OK;
2378 2380
2379out_drop: 2381out_drop: