aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/smc911x.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/smc911x.c')
-rw-r--r--drivers/net/smc911x.c38
1 files changed, 17 insertions, 21 deletions
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 8aa7460ef0e3..f59c7772f344 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -155,23 +155,17 @@ static void PRINT_PKT(u_char *buf, int length)
155/* this enables an interrupt in the interrupt mask register */ 155/* this enables an interrupt in the interrupt mask register */
156#define SMC_ENABLE_INT(lp, x) do { \ 156#define SMC_ENABLE_INT(lp, x) do { \
157 unsigned int __mask; \ 157 unsigned int __mask; \
158 unsigned long __flags; \
159 spin_lock_irqsave(&lp->lock, __flags); \
160 __mask = SMC_GET_INT_EN((lp)); \ 158 __mask = SMC_GET_INT_EN((lp)); \
161 __mask |= (x); \ 159 __mask |= (x); \
162 SMC_SET_INT_EN((lp), __mask); \ 160 SMC_SET_INT_EN((lp), __mask); \
163 spin_unlock_irqrestore(&lp->lock, __flags); \
164} while (0) 161} while (0)
165 162
166/* this disables an interrupt from the interrupt mask register */ 163/* this disables an interrupt from the interrupt mask register */
167#define SMC_DISABLE_INT(lp, x) do { \ 164#define SMC_DISABLE_INT(lp, x) do { \
168 unsigned int __mask; \ 165 unsigned int __mask; \
169 unsigned long __flags; \
170 spin_lock_irqsave(&lp->lock, __flags); \
171 __mask = SMC_GET_INT_EN((lp)); \ 166 __mask = SMC_GET_INT_EN((lp)); \
172 __mask &= ~(x); \ 167 __mask &= ~(x); \
173 SMC_SET_INT_EN((lp), __mask); \ 168 SMC_SET_INT_EN((lp), __mask); \
174 spin_unlock_irqrestore(&lp->lock, __flags); \
175} while (0) 169} while (0)
176 170
177/* 171/*
@@ -180,7 +174,7 @@ static void PRINT_PKT(u_char *buf, int length)
180static void smc911x_reset(struct net_device *dev) 174static void smc911x_reset(struct net_device *dev)
181{ 175{
182 struct smc911x_local *lp = netdev_priv(dev); 176 struct smc911x_local *lp = netdev_priv(dev);
183 unsigned int reg, timeout=0, resets=1; 177 unsigned int reg, timeout=0, resets=1, irq_cfg;
184 unsigned long flags; 178 unsigned long flags;
185 179
186 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 180 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
@@ -252,7 +246,12 @@ static void smc911x_reset(struct net_device *dev)
252 * Deassert IRQ for 1*10us for edge type interrupts 246 * Deassert IRQ for 1*10us for edge type interrupts
253 * and drive IRQ pin push-pull 247 * and drive IRQ pin push-pull
254 */ 248 */
255 SMC_SET_IRQ_CFG(lp, (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_); 249 irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_;
250#ifdef SMC_DYNAMIC_BUS_CONFIG
251 if (lp->cfg.irq_polarity)
252 irq_cfg |= INT_CFG_IRQ_POL_;
253#endif
254 SMC_SET_IRQ_CFG(lp, irq_cfg);
256 255
257 /* clear anything saved */ 256 /* clear anything saved */
258 if (lp->pending_tx_skb != NULL) { 257 if (lp->pending_tx_skb != NULL) {
@@ -274,6 +273,8 @@ static void smc911x_enable(struct net_device *dev)
274 273
275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 274 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
276 275
276 spin_lock_irqsave(&lp->lock, flags);
277
277 SMC_SET_MAC_ADDR(lp, dev->dev_addr); 278 SMC_SET_MAC_ADDR(lp, dev->dev_addr);
278 279
279 /* Enable TX */ 280 /* Enable TX */
@@ -286,12 +287,10 @@ static void smc911x_enable(struct net_device *dev)
286 SMC_SET_FIFO_TSL(lp, 64); 287 SMC_SET_FIFO_TSL(lp, 64);
287 SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); 288 SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
288 289
289 spin_lock_irqsave(&lp->lock, flags);
290 SMC_GET_MAC_CR(lp, cr); 290 SMC_GET_MAC_CR(lp, cr);
291 cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; 291 cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
292 SMC_SET_MAC_CR(lp, cr); 292 SMC_SET_MAC_CR(lp, cr);
293 SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_); 293 SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
294 spin_unlock_irqrestore(&lp->lock, flags);
295 294
296 /* Add 2 byte padding to start of packets */ 295 /* Add 2 byte padding to start of packets */
297 SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_); 296 SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
@@ -300,9 +299,7 @@ static void smc911x_enable(struct net_device *dev)
300 if (cr & MAC_CR_RXEN_) 299 if (cr & MAC_CR_RXEN_)
301 DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name); 300 DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
302 301
303 spin_lock_irqsave(&lp->lock, flags);
304 SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_); 302 SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
305 spin_unlock_irqrestore(&lp->lock, flags);
306 303
307 /* Interrupt on every received packet */ 304 /* Interrupt on every received packet */
308 SMC_SET_FIFO_RSA(lp, 0x01); 305 SMC_SET_FIFO_RSA(lp, 0x01);
@@ -318,6 +315,8 @@ static void smc911x_enable(struct net_device *dev)
318 mask|=INT_EN_RDFO_EN_; 315 mask|=INT_EN_RDFO_EN_;
319 } 316 }
320 SMC_ENABLE_INT(lp, mask); 317 SMC_ENABLE_INT(lp, mask);
318
319 spin_unlock_irqrestore(&lp->lock, flags);
321} 320}
322 321
323/* 322/*
@@ -458,7 +457,6 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
458 struct sk_buff *skb; 457 struct sk_buff *skb;
459 unsigned int cmdA, cmdB, len; 458 unsigned int cmdA, cmdB, len;
460 unsigned char *buf; 459 unsigned char *buf;
461 unsigned long flags;
462 460
463 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__); 461 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
464 BUG_ON(lp->pending_tx_skb == NULL); 462 BUG_ON(lp->pending_tx_skb == NULL);
@@ -503,11 +501,9 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
503 dev->trans_start = jiffies; 501 dev->trans_start = jiffies;
504 dev_kfree_skb(skb); 502 dev_kfree_skb(skb);
505#endif 503#endif
506 spin_lock_irqsave(&lp->lock, flags);
507 if (!lp->tx_throttle) { 504 if (!lp->tx_throttle) {
508 netif_wake_queue(dev); 505 netif_wake_queue(dev);
509 } 506 }
510 spin_unlock_irqrestore(&lp->lock, flags);
511 SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); 507 SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
512} 508}
513 509
@@ -526,6 +522,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
526 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", 522 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
527 dev->name, __func__); 523 dev->name, __func__);
528 524
525 spin_lock_irqsave(&lp->lock, flags);
526
529 BUG_ON(lp->pending_tx_skb != NULL); 527 BUG_ON(lp->pending_tx_skb != NULL);
530 528
531 free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_; 529 free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
@@ -535,12 +533,10 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
535 if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { 533 if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
536 DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n", 534 DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
537 dev->name, free); 535 dev->name, free);
538 spin_lock_irqsave(&lp->lock, flags);
539 /* Reenable when at least 1 packet of size MTU present */ 536 /* Reenable when at least 1 packet of size MTU present */
540 SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64); 537 SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
541 lp->tx_throttle = 1; 538 lp->tx_throttle = 1;
542 netif_stop_queue(dev); 539 netif_stop_queue(dev);
543 spin_unlock_irqrestore(&lp->lock, flags);
544 } 540 }
545 541
546 /* Drop packets when we run out of space in TX FIFO 542 /* Drop packets when we run out of space in TX FIFO
@@ -556,6 +552,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
556 lp->pending_tx_skb = NULL; 552 lp->pending_tx_skb = NULL;
557 dev->stats.tx_errors++; 553 dev->stats.tx_errors++;
558 dev->stats.tx_dropped++; 554 dev->stats.tx_dropped++;
555 spin_unlock_irqrestore(&lp->lock, flags);
559 dev_kfree_skb(skb); 556 dev_kfree_skb(skb);
560 return 0; 557 return 0;
561 } 558 }
@@ -565,7 +562,6 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
565 /* If the DMA is already running then defer this packet Tx until 562 /* If the DMA is already running then defer this packet Tx until
566 * the DMA IRQ starts it 563 * the DMA IRQ starts it
567 */ 564 */
568 spin_lock_irqsave(&lp->lock, flags);
569 if (lp->txdma_active) { 565 if (lp->txdma_active) {
570 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name); 566 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
571 lp->pending_tx_skb = skb; 567 lp->pending_tx_skb = skb;
@@ -576,11 +572,11 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
576 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name); 572 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
577 lp->txdma_active = 1; 573 lp->txdma_active = 1;
578 } 574 }
579 spin_unlock_irqrestore(&lp->lock, flags);
580 } 575 }
581#endif 576#endif
582 lp->pending_tx_skb = skb; 577 lp->pending_tx_skb = skb;
583 smc911x_hardware_send_pkt(dev); 578 smc911x_hardware_send_pkt(dev);
579 spin_unlock_irqrestore(&lp->lock, flags);
584 580
585 return 0; 581 return 0;
586} 582}
@@ -1242,7 +1238,7 @@ smc911x_rx_dma_irq(int dma, void *data)
1242 netif_rx(skb); 1238 netif_rx(skb);
1243 1239
1244 spin_lock_irqsave(&lp->lock, flags); 1240 spin_lock_irqsave(&lp->lock, flags);
1245 pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16; 1241 pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16;
1246 if (pkts != 0) { 1242 if (pkts != 0) {
1247 smc911x_rcv(dev); 1243 smc911x_rcv(dev);
1248 }else { 1244 }else {
@@ -2054,7 +2050,7 @@ err_out:
2054 */ 2050 */
2055static int smc911x_drv_probe(struct platform_device *pdev) 2051static int smc911x_drv_probe(struct platform_device *pdev)
2056{ 2052{
2057 struct smc91x_platdata *pd = pdev->dev.platform_data; 2053 struct smc911x_platdata *pd = pdev->dev.platform_data;
2058 struct net_device *ndev; 2054 struct net_device *ndev;
2059 struct resource *res; 2055 struct resource *res;
2060 struct smc911x_local *lp; 2056 struct smc911x_local *lp;