aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorSebastian Siewior <bigeasy@linutronix.de>2008-05-01 00:08:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-01 11:08:36 -0400
commit3b2b74cad34e7a0cf6d4929ee9e8ad4e11a84867 (patch)
tree363055bb8fb9dcdf04aac3191f6258c798b9a43f /drivers/net
parentc1d9615680c3f43a305b1f66dff0f933d5079273 (diff)
m68knommu: fix FEC driver locking
It's easy: grab locks before talking to hardware and realease them afterwards. The one big lock has been splitted into a hw_lock and mii_lock. Signed-off-by: Sebastian Siewior <bigeasy@linutronix.de> Signed-off-by: Greg Ungerer <gerg@uclinux.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/fec.c55
1 files changed, 33 insertions, 22 deletions
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 5f9c2c1a9d40..32a4f17d35fc 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -209,7 +209,10 @@ struct fec_enet_private {
209 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ 209 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
210 cbd_t *dirty_tx; /* The ring entries to be free()ed. */ 210 cbd_t *dirty_tx; /* The ring entries to be free()ed. */
211 uint tx_full; 211 uint tx_full;
212 spinlock_t lock; 212 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
213 spinlock_t hw_lock;
214 /* hold while accessing the mii_list_t() elements */
215 spinlock_t mii_lock;
213 216
214 uint phy_id; 217 uint phy_id;
215 uint phy_id_done; 218 uint phy_id_done;
@@ -313,6 +316,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
313 volatile fec_t *fecp; 316 volatile fec_t *fecp;
314 volatile cbd_t *bdp; 317 volatile cbd_t *bdp;
315 unsigned short status; 318 unsigned short status;
319 unsigned long flags;
316 320
317 fep = netdev_priv(dev); 321 fep = netdev_priv(dev);
318 fecp = (volatile fec_t*)dev->base_addr; 322 fecp = (volatile fec_t*)dev->base_addr;
@@ -322,6 +326,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
322 return 1; 326 return 1;
323 } 327 }
324 328
329 spin_lock_irqsave(&fep->hw_lock, flags);
325 /* Fill in a Tx ring entry */ 330 /* Fill in a Tx ring entry */
326 bdp = fep->cur_tx; 331 bdp = fep->cur_tx;
327 332
@@ -332,6 +337,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
332 * This should not happen, since dev->tbusy should be set. 337 * This should not happen, since dev->tbusy should be set.
333 */ 338 */
334 printk("%s: tx queue full!.\n", dev->name); 339 printk("%s: tx queue full!.\n", dev->name);
340 spin_unlock_irqrestore(&fep->hw_lock, flags);
335 return 1; 341 return 1;
336 } 342 }
337#endif 343#endif
@@ -370,8 +376,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
370 flush_dcache_range((unsigned long)skb->data, 376 flush_dcache_range((unsigned long)skb->data,
371 (unsigned long)skb->data + skb->len); 377 (unsigned long)skb->data + skb->len);
372 378
373 spin_lock_irq(&fep->lock);
374
375 /* Send it on its way. Tell FEC it's ready, interrupt when done, 379 /* Send it on its way. Tell FEC it's ready, interrupt when done,
376 * it's the last BD of the frame, and to put the CRC on the end. 380 * it's the last BD of the frame, and to put the CRC on the end.
377 */ 381 */
@@ -400,7 +404,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
400 404
401 fep->cur_tx = (cbd_t *)bdp; 405 fep->cur_tx = (cbd_t *)bdp;
402 406
403 spin_unlock_irq(&fep->lock); 407 spin_unlock_irqrestore(&fep->hw_lock, flags);
404 408
405 return 0; 409 return 0;
406} 410}
@@ -458,19 +462,20 @@ fec_enet_interrupt(int irq, void * dev_id)
458 struct net_device *dev = dev_id; 462 struct net_device *dev = dev_id;
459 volatile fec_t *fecp; 463 volatile fec_t *fecp;
460 uint int_events; 464 uint int_events;
461 int handled = 0; 465 irqreturn_t ret = IRQ_NONE;
462 466
463 fecp = (volatile fec_t*)dev->base_addr; 467 fecp = (volatile fec_t*)dev->base_addr;
464 468
465 /* Get the interrupt events that caused us to be here. 469 /* Get the interrupt events that caused us to be here.
466 */ 470 */
467 while ((int_events = fecp->fec_ievent) != 0) { 471 do {
472 int_events = fecp->fec_ievent;
468 fecp->fec_ievent = int_events; 473 fecp->fec_ievent = int_events;
469 474
470 /* Handle receive event in its own function. 475 /* Handle receive event in its own function.
471 */ 476 */
472 if (int_events & FEC_ENET_RXF) { 477 if (int_events & FEC_ENET_RXF) {
473 handled = 1; 478 ret = IRQ_HANDLED;
474 fec_enet_rx(dev); 479 fec_enet_rx(dev);
475 } 480 }
476 481
@@ -479,17 +484,18 @@ fec_enet_interrupt(int irq, void * dev_id)
479 them as part of the transmit process. 484 them as part of the transmit process.
480 */ 485 */
481 if (int_events & FEC_ENET_TXF) { 486 if (int_events & FEC_ENET_TXF) {
482 handled = 1; 487 ret = IRQ_HANDLED;
483 fec_enet_tx(dev); 488 fec_enet_tx(dev);
484 } 489 }
485 490
486 if (int_events & FEC_ENET_MII) { 491 if (int_events & FEC_ENET_MII) {
487 handled = 1; 492 ret = IRQ_HANDLED;
488 fec_enet_mii(dev); 493 fec_enet_mii(dev);
489 } 494 }
490 495
491 } 496 } while (int_events);
492 return IRQ_RETVAL(handled); 497
498 return ret;
493} 499}
494 500
495 501
@@ -502,7 +508,7 @@ fec_enet_tx(struct net_device *dev)
502 struct sk_buff *skb; 508 struct sk_buff *skb;
503 509
504 fep = netdev_priv(dev); 510 fep = netdev_priv(dev);
505 spin_lock(&fep->lock); 511 spin_lock_irq(&fep->hw_lock);
506 bdp = fep->dirty_tx; 512 bdp = fep->dirty_tx;
507 513
508 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 514 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
@@ -561,7 +567,7 @@ fec_enet_tx(struct net_device *dev)
561 } 567 }
562 } 568 }
563 fep->dirty_tx = (cbd_t *)bdp; 569 fep->dirty_tx = (cbd_t *)bdp;
564 spin_unlock(&fep->lock); 570 spin_unlock_irq(&fep->hw_lock);
565} 571}
566 572
567 573
@@ -588,6 +594,8 @@ fec_enet_rx(struct net_device *dev)
588 fep = netdev_priv(dev); 594 fep = netdev_priv(dev);
589 fecp = (volatile fec_t*)dev->base_addr; 595 fecp = (volatile fec_t*)dev->base_addr;
590 596
597 spin_lock_irq(&fep->hw_lock);
598
591 /* First, grab all of the stats for the incoming packet. 599 /* First, grab all of the stats for the incoming packet.
592 * These get messed up if we get called due to a busy condition. 600 * These get messed up if we get called due to a busy condition.
593 */ 601 */
@@ -693,6 +701,8 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
693 */ 701 */
694 fecp->fec_r_des_active = 0; 702 fecp->fec_r_des_active = 0;
695#endif 703#endif
704
705 spin_unlock_irq(&fep->hw_lock);
696} 706}
697 707
698 708
@@ -706,11 +716,11 @@ fec_enet_mii(struct net_device *dev)
706 uint mii_reg; 716 uint mii_reg;
707 717
708 fep = netdev_priv(dev); 718 fep = netdev_priv(dev);
719 spin_lock_irq(&fep->mii_lock);
720
709 ep = fep->hwp; 721 ep = fep->hwp;
710 mii_reg = ep->fec_mii_data; 722 mii_reg = ep->fec_mii_data;
711 723
712 spin_lock(&fep->lock);
713
714 if ((mip = mii_head) == NULL) { 724 if ((mip = mii_head) == NULL) {
715 printk("MII and no head!\n"); 725 printk("MII and no head!\n");
716 goto unlock; 726 goto unlock;
@@ -727,7 +737,7 @@ fec_enet_mii(struct net_device *dev)
727 ep->fec_mii_data = mip->mii_regval; 737 ep->fec_mii_data = mip->mii_regval;
728 738
729unlock: 739unlock:
730 spin_unlock(&fep->lock); 740 spin_unlock_irq(&fep->mii_lock);
731} 741}
732 742
733static int 743static int
@@ -741,12 +751,11 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
741 /* Add PHY address to register command. 751 /* Add PHY address to register command.
742 */ 752 */
743 fep = netdev_priv(dev); 753 fep = netdev_priv(dev);
744 regval |= fep->phy_addr << 23; 754 spin_lock_irqsave(&fep->mii_lock, flags);
745 755
756 regval |= fep->phy_addr << 23;
746 retval = 0; 757 retval = 0;
747 758
748 spin_lock_irqsave(&fep->lock,flags);
749
750 if ((mip = mii_free) != NULL) { 759 if ((mip = mii_free) != NULL) {
751 mii_free = mip->mii_next; 760 mii_free = mip->mii_next;
752 mip->mii_regval = regval; 761 mip->mii_regval = regval;
@@ -763,9 +772,8 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
763 retval = 1; 772 retval = 1;
764 } 773 }
765 774
766 spin_unlock_irqrestore(&fep->lock,flags); 775 spin_unlock_irqrestore(&fep->mii_lock, flags);
767 776 return retval;
768 return(retval);
769} 777}
770 778
771static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) 779static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
@@ -2308,6 +2316,9 @@ int __init fec_enet_init(struct net_device *dev)
2308 return -ENOMEM; 2316 return -ENOMEM;
2309 } 2317 }
2310 2318
2319 spin_lock_init(&fep->hw_lock);
2320 spin_lock_init(&fep->mii_lock);
2321
2311 /* Create an Ethernet device instance. 2322 /* Create an Ethernet device instance.
2312 */ 2323 */
2313 fecp = (volatile fec_t *) fec_hw[index]; 2324 fecp = (volatile fec_t *) fec_hw[index];