diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2006-12-01 19:36:22 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-12-02 00:24:50 -0500 |
commit | cabdfb373ae74036225826ce260c16a8e260eb0b (patch) | |
tree | 3e0f126e924fd6ec95b9339ba4889fb668eb8e87 /drivers/net/chelsio | |
parent | 56f643c28c5df63693d7c66e56f8e4767cfd7a65 (diff) |
[PATCH] chelesio: transmit locking (plus bug fix).
If transmit lock is contended on, then push return code back
and retry at higher level.
Bugfix: If buffer is reallocated because of lack of headroom
and the send is blocked, then drop packet. This is necessary
because caller would end up requeuing a freed skb.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/chelsio')
-rw-r--r-- | drivers/net/chelsio/sge.c | 19 |
1 files changed, 15 insertions, 4 deletions
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 9911048d8213..0ca8d876e16f 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1778,7 +1778,9 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, | |||
1778 | struct cmdQ *q = &sge->cmdQ[qid]; | 1778 | struct cmdQ *q = &sge->cmdQ[qid]; |
1779 | unsigned int credits, pidx, genbit, count, use_sched_skb = 0; | 1779 | unsigned int credits, pidx, genbit, count, use_sched_skb = 0; |
1780 | 1780 | ||
1781 | spin_lock(&q->lock); | 1781 | if (!spin_trylock(&q->lock)) |
1782 | return NETDEV_TX_LOCKED; | ||
1783 | |||
1782 | reclaim_completed_tx(sge, q); | 1784 | reclaim_completed_tx(sge, q); |
1783 | 1785 | ||
1784 | pidx = q->pidx; | 1786 | pidx = q->pidx; |
@@ -1887,6 +1889,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1887 | struct sge *sge = adapter->sge; | 1889 | struct sge *sge = adapter->sge; |
1888 | struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id()); | 1890 | struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id()); |
1889 | struct cpl_tx_pkt *cpl; | 1891 | struct cpl_tx_pkt *cpl; |
1892 | struct sk_buff *orig_skb = skb; | ||
1893 | int ret; | ||
1890 | 1894 | ||
1891 | if (skb->protocol == htons(ETH_P_CPL5)) | 1895 | if (skb->protocol == htons(ETH_P_CPL5)) |
1892 | goto send; | 1896 | goto send; |
@@ -1930,8 +1934,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1930 | * Complain when this happens but try to fix things up. | 1934 | * Complain when this happens but try to fix things up. |
1931 | */ | 1935 | */ |
1932 | if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { | 1936 | if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { |
1933 | struct sk_buff *orig_skb = skb; | ||
1934 | |||
1935 | pr_debug("%s: headroom %d header_len %d\n", dev->name, | 1937 | pr_debug("%s: headroom %d header_len %d\n", dev->name, |
1936 | skb_headroom(skb), dev->hard_header_len); | 1938 | skb_headroom(skb), dev->hard_header_len); |
1937 | 1939 | ||
@@ -1991,7 +1993,16 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1991 | send: | 1993 | send: |
1992 | st->tx_packets++; | 1994 | st->tx_packets++; |
1993 | dev->trans_start = jiffies; | 1995 | dev->trans_start = jiffies; |
1994 | return t1_sge_tx(skb, adapter, 0, dev); | 1996 | ret = t1_sge_tx(skb, adapter, 0, dev); |
1997 | |||
1998 | /* If transmit busy, and we reallocated skb's due to headroom limit, | ||
1999 | * then silently discard to avoid leak. | ||
2000 | */ | ||
2001 | if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { | ||
2002 | dev_kfree_skb_any(skb); | ||
2003 | ret = NETDEV_TX_OK; | ||
2004 | } | ||
2005 | return ret; | ||
1995 | } | 2006 | } |
1996 | 2007 | ||
1997 | /* | 2008 | /* |