aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/moxa/moxart_ether.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-02-01 18:56:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-01 18:56:08 -0500
commit34229b277480f46c1e9a19f027f30b074512e68b (patch)
tree90d8b43ebceb850b0e7852d75283aebbd2abbc00 /drivers/net/ethernet/moxa/moxart_ether.c
parent2c923414d3963b959f65a8a6031972402e6a34a5 (diff)
parent53729eb174c1589f9185340ffe8c10b3f39f3ef3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "This looks like a lot but it's a mixture of regression fixes as well as fixes for longer standing issues. 1) Fix on-channel cancellation in mac80211, from Johannes Berg. 2) Handle CHECKSUM_COMPLETE properly in xt_TCPMSS netfilter xtables module, from Eric Dumazet. 3) Avoid infinite loop in UDP SO_REUSEPORT logic, also from Eric Dumazet. 4) Avoid a NULL deref if we try to set SO_REUSEPORT after a socket is bound, from Craig Gallek. 5) GRO key comparisons don't take lightweight tunnels into account, from Jesse Gross. 6) Fix struct pid leak via SCM credentials in AF_UNIX, from Eric Dumazet. 7) We need to set the rtnl_link_ops of ipv6 SIT tunnels before we register them, otherwise the NEWLINK netlink message is missing the proper attributes. From Thadeu Lima de Souza Cascardo. 8) Several Spectrum chip bug fixes for mlxsw switch driver, from Ido Schimmel 9) Handle fragments properly in ipv4 easly socket demux, from Eric Dumazet. 10) Don't ignore the ifindex key specifier on ipv6 output route lookups, from Paolo Abeni" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (128 commits) tcp: avoid cwnd undo after receiving ECN irda: fix a potential use-after-free in ircomm_param_request net: tg3: avoid uninitialized variable warning net: nb8800: avoid uninitialized variable warning net: vxge: avoid unused function warnings net: bgmac: clarify CONFIG_BCMA dependency net: hp100: remove unnecessary #ifdefs net: davinci_cpdma: use dma_addr_t for DMA address ipv6/udp: use sticky pktinfo egress ifindex on connect() ipv6: enforce flowi6_oif usage in ip6_dst_lookup_tail() netlink: not trim skb for mmaped socket when dump vxlan: fix a out of bounds access in __vxlan_find_mac net: dsa: mv88e6xxx: fix port VLAN maps fib_trie: Fix shift by 32 in fib_table_lookup net: moxart: use correct accessors for DMA memory ipv4: ipconfig: avoid unused ic_proto_used symbol bnxt_en: Fix crash in bnxt_free_tx_skbs() during tx timeout. bnxt_en: Exclude rx_drop_pkts hw counter from the stack's rx_dropped counter. bnxt_en: Ring free response from close path should use completion ring net_sched: drr: check for NULL pointer in drr_dequeue ...
Diffstat (limited to 'drivers/net/ethernet/moxa/moxart_ether.c')
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c46
1 files changed, 30 insertions, 16 deletions
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index a10c928bbd6b..00cfd95ca59d 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -28,6 +28,16 @@
28 28
29#include "moxart_ether.h" 29#include "moxart_ether.h"
30 30
31static inline void moxart_desc_write(u32 data, u32 *desc)
32{
33 *desc = cpu_to_le32(data);
34}
35
36static inline u32 moxart_desc_read(u32 *desc)
37{
38 return le32_to_cpu(*desc);
39}
40
31static inline void moxart_emac_write(struct net_device *ndev, 41static inline void moxart_emac_write(struct net_device *ndev,
32 unsigned int reg, unsigned long value) 42 unsigned int reg, unsigned long value)
33{ 43{
@@ -112,7 +122,7 @@ static void moxart_mac_enable(struct net_device *ndev)
112static void moxart_mac_setup_desc_ring(struct net_device *ndev) 122static void moxart_mac_setup_desc_ring(struct net_device *ndev)
113{ 123{
114 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 124 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
115 void __iomem *desc; 125 void *desc;
116 int i; 126 int i;
117 127
118 for (i = 0; i < TX_DESC_NUM; i++) { 128 for (i = 0; i < TX_DESC_NUM; i++) {
@@ -121,7 +131,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
121 131
122 priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i; 132 priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
123 } 133 }
124 writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1); 134 moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
125 135
126 priv->tx_head = 0; 136 priv->tx_head = 0;
127 priv->tx_tail = 0; 137 priv->tx_tail = 0;
@@ -129,8 +139,8 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
129 for (i = 0; i < RX_DESC_NUM; i++) { 139 for (i = 0; i < RX_DESC_NUM; i++) {
130 desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE; 140 desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
131 memset(desc, 0, RX_REG_DESC_SIZE); 141 memset(desc, 0, RX_REG_DESC_SIZE);
132 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); 142 moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
133 writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK, 143 moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
134 desc + RX_REG_OFFSET_DESC1); 144 desc + RX_REG_OFFSET_DESC1);
135 145
136 priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i; 146 priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
@@ -141,12 +151,12 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
141 if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i])) 151 if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
142 netdev_err(ndev, "DMA mapping error\n"); 152 netdev_err(ndev, "DMA mapping error\n");
143 153
144 writel(priv->rx_mapping[i], 154 moxart_desc_write(priv->rx_mapping[i],
145 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS); 155 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
146 writel(priv->rx_buf[i], 156 moxart_desc_write((uintptr_t)priv->rx_buf[i],
147 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT); 157 desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
148 } 158 }
149 writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1); 159 moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
150 160
151 priv->rx_head = 0; 161 priv->rx_head = 0;
152 162
@@ -201,14 +211,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
201 napi); 211 napi);
202 struct net_device *ndev = priv->ndev; 212 struct net_device *ndev = priv->ndev;
203 struct sk_buff *skb; 213 struct sk_buff *skb;
204 void __iomem *desc; 214 void *desc;
205 unsigned int desc0, len; 215 unsigned int desc0, len;
206 int rx_head = priv->rx_head; 216 int rx_head = priv->rx_head;
207 int rx = 0; 217 int rx = 0;
208 218
209 while (rx < budget) { 219 while (rx < budget) {
210 desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); 220 desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
211 desc0 = readl(desc + RX_REG_OFFSET_DESC0); 221 desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0);
222 rmb(); /* ensure desc0 is up to date */
212 223
213 if (desc0 & RX_DESC0_DMA_OWN) 224 if (desc0 & RX_DESC0_DMA_OWN)
214 break; 225 break;
@@ -250,7 +261,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
250 priv->stats.multicast++; 261 priv->stats.multicast++;
251 262
252rx_next: 263rx_next:
253 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); 264 wmb(); /* prevent setting ownership back too early */
265 moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
254 266
255 rx_head = RX_NEXT(rx_head); 267 rx_head = RX_NEXT(rx_head);
256 priv->rx_head = rx_head; 268 priv->rx_head = rx_head;
@@ -310,7 +322,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
310static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) 322static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
311{ 323{
312 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 324 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
313 void __iomem *desc; 325 void *desc;
314 unsigned int len; 326 unsigned int len;
315 unsigned int tx_head = priv->tx_head; 327 unsigned int tx_head = priv->tx_head;
316 u32 txdes1; 328 u32 txdes1;
@@ -319,11 +331,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
319 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); 331 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
320 332
321 spin_lock_irq(&priv->txlock); 333 spin_lock_irq(&priv->txlock);
322 if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { 334 if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
323 net_dbg_ratelimited("no TX space for packet\n"); 335 net_dbg_ratelimited("no TX space for packet\n");
324 priv->stats.tx_dropped++; 336 priv->stats.tx_dropped++;
325 goto out_unlock; 337 goto out_unlock;
326 } 338 }
339 rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
327 340
328 len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; 341 len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
329 342
@@ -337,9 +350,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
337 priv->tx_len[tx_head] = len; 350 priv->tx_len[tx_head] = len;
338 priv->tx_skb[tx_head] = skb; 351 priv->tx_skb[tx_head] = skb;
339 352
340 writel(priv->tx_mapping[tx_head], 353 moxart_desc_write(priv->tx_mapping[tx_head],
341 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS); 354 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
342 writel(skb->data, 355 moxart_desc_write((uintptr_t)skb->data,
343 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT); 356 desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
344 357
345 if (skb->len < ETH_ZLEN) { 358 if (skb->len < ETH_ZLEN) {
@@ -354,8 +367,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
354 txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); 367 txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
355 if (tx_head == TX_DESC_NUM_MASK) 368 if (tx_head == TX_DESC_NUM_MASK)
356 txdes1 |= TX_DESC1_END; 369 txdes1 |= TX_DESC1_END;
357 writel(txdes1, desc + TX_REG_OFFSET_DESC1); 370 moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1);
358 writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); 371 wmb(); /* flush descriptor before transferring ownership */
372 moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
359 373
360 /* start to send packet */ 374 /* start to send packet */
361 writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND); 375 writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);