diff options
author | Neil Horman <nhorman@tuxdriver.com> | 2014-05-12 10:38:18 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-05-14 15:11:22 -0400 |
commit | c4b160685fc85e41fe8c08478cc61f4877d26973 (patch) | |
tree | 2f47eb3b894e440f007553682974656efb9a45ae | |
parent | 3a1cebe7e05027a1c96f2fc1a8eddf5f19b78f42 (diff) |
jme: Fix unmap loop counting error:
In my recent fix (76a691d0a: fix dma unmap warning), Ben Hutchings noted that my
loop count was incorrect. Where j started at startidx, it should have started
at zero, and gone on for count entries, not to endidx. Additionally, a DMA
resource exhaustion should drop the frame and (for now), return
NETDEV_TX_OK, not NETEV_TX_BUSY. This patch fixes both of those issues:
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ben Hutchings <ben@decadent.org.uk>
CC: "David S. Miller" <davem@davemloft.net>
CC: Guo-Fu Tseng <cooldavid@cooldavid.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/jme.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 6e664d9038d6..b78378cea5e3 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c | |||
@@ -2027,14 +2027,14 @@ jme_fill_tx_map(struct pci_dev *pdev, | |||
2027 | return 0; | 2027 | return 0; |
2028 | } | 2028 | } |
2029 | 2029 | ||
2030 | static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int endidx) | 2030 | static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) |
2031 | { | 2031 | { |
2032 | struct jme_ring *txring = &(jme->txring[0]); | 2032 | struct jme_ring *txring = &(jme->txring[0]); |
2033 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; | 2033 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; |
2034 | int mask = jme->tx_ring_mask; | 2034 | int mask = jme->tx_ring_mask; |
2035 | int j; | 2035 | int j; |
2036 | 2036 | ||
2037 | for (j = startidx ; j < endidx ; ++j) { | 2037 | for (j = 0 ; j < count ; j++) { |
2038 | ctxbi = txbi + ((startidx + j + 2) & (mask)); | 2038 | ctxbi = txbi + ((startidx + j + 2) & (mask)); |
2039 | pci_unmap_page(jme->pdev, | 2039 | pci_unmap_page(jme->pdev, |
2040 | ctxbi->mapping, | 2040 | ctxbi->mapping, |
@@ -2069,7 +2069,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) | |||
2069 | skb_frag_page(frag), | 2069 | skb_frag_page(frag), |
2070 | frag->page_offset, skb_frag_size(frag), hidma); | 2070 | frag->page_offset, skb_frag_size(frag), hidma); |
2071 | if (ret) { | 2071 | if (ret) { |
2072 | jme_drop_tx_map(jme, idx, idx+i); | 2072 | jme_drop_tx_map(jme, idx, i); |
2073 | goto out; | 2073 | goto out; |
2074 | } | 2074 | } |
2075 | 2075 | ||
@@ -2081,7 +2081,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) | |||
2081 | ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), | 2081 | ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), |
2082 | offset_in_page(skb->data), len, hidma); | 2082 | offset_in_page(skb->data), len, hidma); |
2083 | if (ret) | 2083 | if (ret) |
2084 | jme_drop_tx_map(jme, idx, idx+i); | 2084 | jme_drop_tx_map(jme, idx, i); |
2085 | 2085 | ||
2086 | out: | 2086 | out: |
2087 | return ret; | 2087 | return ret; |
@@ -2269,7 +2269,7 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2269 | } | 2269 | } |
2270 | 2270 | ||
2271 | if (jme_fill_tx_desc(jme, skb, idx)) | 2271 | if (jme_fill_tx_desc(jme, skb, idx)) |
2272 | return NETDEV_TX_BUSY; | 2272 | return NETDEV_TX_OK; |
2273 | 2273 | ||
2274 | jwrite32(jme, JME_TXCS, jme->reg_txcs | | 2274 | jwrite32(jme, JME_TXCS, jme->reg_txcs | |
2275 | TXCS_SELECT_QUEUE0 | | 2275 | TXCS_SELECT_QUEUE0 | |