aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNiklas Cassel <niklas.cassel@axis.com>2017-06-20 08:32:41 -0400
committerDavid S. Miller <davem@davemloft.net>2017-06-20 15:41:00 -0400
commit05cf0d1bf4ed722aefff92775244dbe9e1bb4679 (patch)
tree17f1e6c46f6da8e830f3e13ac1edd17d49246978
parent57f0c9cf58ff7fe479137ab847a886d0eed3ad1d (diff)
net: stmmac: free an skb first when there are no longer any descriptors using it
When having the skb pointer in the first descriptor, stmmac_tx_clean can get called at a moment where the IP has only cleared the own bit of the first descriptor, thus freeing the skb, even though there can be several descriptors whose buffers point into the same skb. By simply moving the skb pointer from the first descriptor to the last descriptor, a skb will get freed only when the IP has cleared the own bit of all the descriptors that are using that skb. Signed-off-by: Niklas Cassel <niklas.cassel@axis.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d16d11bfc046..6e4cbc6ce0ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2831,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2831 2831
2832 tx_q->tx_skbuff_dma[first_entry].buf = des; 2832 tx_q->tx_skbuff_dma[first_entry].buf = des;
2833 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 2833 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2834 tx_q->tx_skbuff[first_entry] = skb;
2835 2834
2836 first->des0 = cpu_to_le32(des); 2835 first->des0 = cpu_to_le32(des);
2837 2836
@@ -2865,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2865 2864
2866 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 2865 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2867 2866
2867 /* Only the last descriptor gets to point to the skb. */
2868 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2869
2870 /* We've used all descriptors we need for this skb, however,
2871 * advance cur_tx so that it references a fresh descriptor.
2872 * ndo_start_xmit will fill this descriptor the next time it's
2873 * called and stmmac_tx_clean may clean up to this descriptor.
2874 */
2868 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2875 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2869 2876
2870 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 2877 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
@@ -2998,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2998 3005
2999 first = desc; 3006 first = desc;
3000 3007
3001 tx_q->tx_skbuff[first_entry] = skb;
3002
3003 enh_desc = priv->plat->enh_desc; 3008 enh_desc = priv->plat->enh_desc;
3004 /* To program the descriptors according to the size of the frame */ 3009 /* To program the descriptors according to the size of the frame */
3005 if (enh_desc) 3010 if (enh_desc)
@@ -3047,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3047 skb->len); 3052 skb->len);
3048 } 3053 }
3049 3054
3050 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3055 /* Only the last descriptor gets to point to the skb. */
3056 tx_q->tx_skbuff[entry] = skb;
3051 3057
3058 /* We've used all descriptors we need for this skb, however,
3059 * advance cur_tx so that it references a fresh descriptor.
3060 * ndo_start_xmit will fill this descriptor the next time it's
3061 * called and stmmac_tx_clean may clean up to this descriptor.
3062 */
3063 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3052 tx_q->cur_tx = entry; 3064 tx_q->cur_tx = entry;
3053 3065
3054 if (netif_msg_pktdata(priv)) { 3066 if (netif_msg_pktdata(priv)) {