diff options
author | Neil Horman <nhorman@tuxdriver.com> | 2014-05-05 14:51:47 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-05-07 15:54:14 -0400 |
commit | 76a691d0ab71a244f7582a5b0387728befbdb52f (patch) | |
tree | 242054eac68746967068475bb81a2ce890d6d9dd | |
parent | d32aebfd64c8c8649b39cd6789b141525cc9d7c0 (diff) |
jme: Fix DMA unmap warning
The jme driver forgot to check the return status from pci_map_page in its tx
path, causing a dma api warning on unmap. Easy fix, just do the check and
augment the tx path to tell the stack that the driver is busy so we re-queue the
frame.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Guo-Fu Tseng <cooldavid@cooldavid.org>
CC: "David S. Miller" <davem@davemloft.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/jme.c | 53 |
1 files changed, 47 insertions, 6 deletions
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index b0c6050479eb..6e664d9038d6 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c | |||
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme, | |||
1988 | return idx; | 1988 | return idx; |
1989 | } | 1989 | } |
1990 | 1990 | ||
1991 | static void | 1991 | static int |
1992 | jme_fill_tx_map(struct pci_dev *pdev, | 1992 | jme_fill_tx_map(struct pci_dev *pdev, |
1993 | struct txdesc *txdesc, | 1993 | struct txdesc *txdesc, |
1994 | struct jme_buffer_info *txbi, | 1994 | struct jme_buffer_info *txbi, |
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev, | |||
2005 | len, | 2005 | len, |
2006 | PCI_DMA_TODEVICE); | 2006 | PCI_DMA_TODEVICE); |
2007 | 2007 | ||
2008 | if (unlikely(pci_dma_mapping_error(pdev, dmaaddr))) | ||
2009 | return -EINVAL; | ||
2010 | |||
2008 | pci_dma_sync_single_for_device(pdev, | 2011 | pci_dma_sync_single_for_device(pdev, |
2009 | dmaaddr, | 2012 | dmaaddr, |
2010 | len, | 2013 | len, |
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev, | |||
2021 | 2024 | ||
2022 | txbi->mapping = dmaaddr; | 2025 | txbi->mapping = dmaaddr; |
2023 | txbi->len = len; | 2026 | txbi->len = len; |
2027 | return 0; | ||
2024 | } | 2028 | } |
2025 | 2029 | ||
2026 | static void | 2030 | static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int endidx) |
2031 | { | ||
2032 | struct jme_ring *txring = &(jme->txring[0]); | ||
2033 | struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; | ||
2034 | int mask = jme->tx_ring_mask; | ||
2035 | int j; | ||
2036 | |||
2037 | for (j = startidx ; j < endidx ; ++j) { | ||
2038 | ctxbi = txbi + ((startidx + j + 2) & (mask)); | ||
2039 | pci_unmap_page(jme->pdev, | ||
2040 | ctxbi->mapping, | ||
2041 | ctxbi->len, | ||
2042 | PCI_DMA_TODEVICE); | ||
2043 | |||
2044 | ctxbi->mapping = 0; | ||
2045 | ctxbi->len = 0; | ||
2046 | } | ||
2047 | |||
2048 | } | ||
2049 | |||
2050 | static int | ||
2027 | jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) | 2051 | jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) |
2028 | { | 2052 | { |
2029 | struct jme_ring *txring = &(jme->txring[0]); | 2053 | struct jme_ring *txring = &(jme->txring[0]); |
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) | |||
2034 | int mask = jme->tx_ring_mask; | 2058 | int mask = jme->tx_ring_mask; |
2035 | const struct skb_frag_struct *frag; | 2059 | const struct skb_frag_struct *frag; |
2036 | u32 len; | 2060 | u32 len; |
2061 | int ret = 0; | ||
2037 | 2062 | ||
2038 | for (i = 0 ; i < nr_frags ; ++i) { | 2063 | for (i = 0 ; i < nr_frags ; ++i) { |
2039 | frag = &skb_shinfo(skb)->frags[i]; | 2064 | frag = &skb_shinfo(skb)->frags[i]; |
2040 | ctxdesc = txdesc + ((idx + i + 2) & (mask)); | 2065 | ctxdesc = txdesc + ((idx + i + 2) & (mask)); |
2041 | ctxbi = txbi + ((idx + i + 2) & (mask)); | 2066 | ctxbi = txbi + ((idx + i + 2) & (mask)); |
2042 | 2067 | ||
2043 | jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, | 2068 | ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, |
2044 | skb_frag_page(frag), | 2069 | skb_frag_page(frag), |
2045 | frag->page_offset, skb_frag_size(frag), hidma); | 2070 | frag->page_offset, skb_frag_size(frag), hidma); |
2071 | if (ret) { | ||
2072 | jme_drop_tx_map(jme, idx, idx+i); | ||
2073 | goto out; | ||
2074 | } | ||
2075 | |||
2046 | } | 2076 | } |
2047 | 2077 | ||
2048 | len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; | 2078 | len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; |
2049 | ctxdesc = txdesc + ((idx + 1) & (mask)); | 2079 | ctxdesc = txdesc + ((idx + 1) & (mask)); |
2050 | ctxbi = txbi + ((idx + 1) & (mask)); | 2080 | ctxbi = txbi + ((idx + 1) & (mask)); |
2051 | jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), | 2081 | ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), |
2052 | offset_in_page(skb->data), len, hidma); | 2082 | offset_in_page(skb->data), len, hidma); |
2083 | if (ret) | ||
2084 | jme_drop_tx_map(jme, idx, idx+i); | ||
2085 | |||
2086 | out: | ||
2087 | return ret; | ||
2053 | 2088 | ||
2054 | } | 2089 | } |
2055 | 2090 | ||
2091 | |||
2056 | static int | 2092 | static int |
2057 | jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) | 2093 | jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) |
2058 | { | 2094 | { |
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) | |||
2131 | struct txdesc *txdesc; | 2167 | struct txdesc *txdesc; |
2132 | struct jme_buffer_info *txbi; | 2168 | struct jme_buffer_info *txbi; |
2133 | u8 flags; | 2169 | u8 flags; |
2170 | int ret = 0; | ||
2134 | 2171 | ||
2135 | txdesc = (struct txdesc *)txring->desc + idx; | 2172 | txdesc = (struct txdesc *)txring->desc + idx; |
2136 | txbi = txring->bufinf + idx; | 2173 | txbi = txring->bufinf + idx; |
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) | |||
2155 | if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) | 2192 | if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) |
2156 | jme_tx_csum(jme, skb, &flags); | 2193 | jme_tx_csum(jme, skb, &flags); |
2157 | jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); | 2194 | jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); |
2158 | jme_map_tx_skb(jme, skb, idx); | 2195 | ret = jme_map_tx_skb(jme, skb, idx); |
2196 | if (ret) | ||
2197 | return ret; | ||
2198 | |||
2159 | txdesc->desc1.flags = flags; | 2199 | txdesc->desc1.flags = flags; |
2160 | /* | 2200 | /* |
2161 | * Set tx buffer info after telling NIC to send | 2201 | * Set tx buffer info after telling NIC to send |
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2228 | return NETDEV_TX_BUSY; | 2268 | return NETDEV_TX_BUSY; |
2229 | } | 2269 | } |
2230 | 2270 | ||
2231 | jme_fill_tx_desc(jme, skb, idx); | 2271 | if (jme_fill_tx_desc(jme, skb, idx)) |
2272 | return NETDEV_TX_BUSY; | ||
2232 | 2273 | ||
2233 | jwrite32(jme, JME_TXCS, jme->reg_txcs | | 2274 | jwrite32(jme, JME_TXCS, jme->reg_txcs | |
2234 | TXCS_SELECT_QUEUE0 | | 2275 | TXCS_SELECT_QUEUE0 | |