aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Fleming <afleming@freescale.com>2006-04-20 17:44:29 -0400
committerJeff Garzik <jeff@garzik.org>2006-04-20 17:55:06 -0400
commitfef6108d4556917c45cd9ba397c1c7597f3990e1 (patch)
treef35566dd3ddbda7cc84fc8a03aa3aebeea7dc746
parentf18b95c3e2ab0f75b23a5aabab0bc8f99bd6bbf3 (diff)
[PATCH] Fix locking in gianfar
This patch fixes several bugs in the gianfar driver, including a major one where spinlocks were horribly broken: * Split gianfar locks into two types: TX and RX * Made it so gfar_start() now clears RHALT * Fixed a bug where calling gfar_start_xmit() with interrupts off would corrupt the interrupt state * Fixed a bug where a frame could potentially arrive, and never be handled (if no more frames arrived * Fixed a bug where the rx_work_limit would never be observed by the rx completion code * Fixed a bug where the interrupt handlers were not actually protected by their spinlocks Signed-off-by: Andy Fleming <afleming@freescale.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r--drivers/net/gianfar.c56
-rw-r--r--drivers/net/gianfar.h67
-rw-r--r--drivers/net/gianfar_ethtool.c20
-rw-r--r--drivers/net/gianfar_sysfs.c24
4 files changed, 100 insertions, 67 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 771e25d8c417..218d31764c52 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -210,7 +210,8 @@ static int gfar_probe(struct platform_device *pdev)
210 goto regs_fail; 210 goto regs_fail;
211 } 211 }
212 212
213 spin_lock_init(&priv->lock); 213 spin_lock_init(&priv->txlock);
214 spin_lock_init(&priv->rxlock);
214 215
215 platform_set_drvdata(pdev, dev); 216 platform_set_drvdata(pdev, dev);
216 217
@@ -515,11 +516,13 @@ void stop_gfar(struct net_device *dev)
515 phy_stop(priv->phydev); 516 phy_stop(priv->phydev);
516 517
517 /* Lock it down */ 518 /* Lock it down */
518 spin_lock_irqsave(&priv->lock, flags); 519 spin_lock_irqsave(&priv->txlock, flags);
520 spin_lock(&priv->rxlock);
519 521
520 gfar_halt(dev); 522 gfar_halt(dev);
521 523
522 spin_unlock_irqrestore(&priv->lock, flags); 524 spin_unlock(&priv->rxlock);
525 spin_unlock_irqrestore(&priv->txlock, flags);
523 526
524 /* Free the IRQs */ 527 /* Free the IRQs */
525 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 528 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -605,14 +608,15 @@ void gfar_start(struct net_device *dev)
605 tempval |= DMACTRL_INIT_SETTINGS; 608 tempval |= DMACTRL_INIT_SETTINGS;
606 gfar_write(&priv->regs->dmactrl, tempval); 609 gfar_write(&priv->regs->dmactrl, tempval);
607 610
608 /* Clear THLT, so that the DMA starts polling now */
609 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
610
611 /* Make sure we aren't stopped */ 611 /* Make sure we aren't stopped */
612 tempval = gfar_read(&priv->regs->dmactrl); 612 tempval = gfar_read(&priv->regs->dmactrl);
613 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 613 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
614 gfar_write(&priv->regs->dmactrl, tempval); 614 gfar_write(&priv->regs->dmactrl, tempval);
615 615
616 /* Clear THLT/RHLT, so that the DMA starts polling now */
617 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
618 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
619
616 /* Unmask the interrupts we look for */ 620 /* Unmask the interrupts we look for */
617 gfar_write(&regs->imask, IMASK_DEFAULT); 621 gfar_write(&regs->imask, IMASK_DEFAULT);
618} 622}
@@ -928,12 +932,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
928 struct txfcb *fcb = NULL; 932 struct txfcb *fcb = NULL;
929 struct txbd8 *txbdp; 933 struct txbd8 *txbdp;
930 u16 status; 934 u16 status;
935 unsigned long flags;
931 936
932 /* Update transmit stats */ 937 /* Update transmit stats */
933 priv->stats.tx_bytes += skb->len; 938 priv->stats.tx_bytes += skb->len;
934 939
935 /* Lock priv now */ 940 /* Lock priv now */
936 spin_lock_irq(&priv->lock); 941 spin_lock_irqsave(&priv->txlock, flags);
937 942
938 /* Point at the first free tx descriptor */ 943 /* Point at the first free tx descriptor */
939 txbdp = priv->cur_tx; 944 txbdp = priv->cur_tx;
@@ -1004,7 +1009,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1004 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1009 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1005 1010
1006 /* Unlock priv */ 1011 /* Unlock priv */
1007 spin_unlock_irq(&priv->lock); 1012 spin_unlock_irqrestore(&priv->txlock, flags);
1008 1013
1009 return 0; 1014 return 0;
1010} 1015}
@@ -1049,7 +1054,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1049 unsigned long flags; 1054 unsigned long flags;
1050 u32 tempval; 1055 u32 tempval;
1051 1056
1052 spin_lock_irqsave(&priv->lock, flags); 1057 spin_lock_irqsave(&priv->rxlock, flags);
1053 1058
1054 priv->vlgrp = grp; 1059 priv->vlgrp = grp;
1055 1060
@@ -1076,7 +1081,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1076 gfar_write(&priv->regs->rctrl, tempval); 1081 gfar_write(&priv->regs->rctrl, tempval);
1077 } 1082 }
1078 1083
1079 spin_unlock_irqrestore(&priv->lock, flags); 1084 spin_unlock_irqrestore(&priv->rxlock, flags);
1080} 1085}
1081 1086
1082 1087
@@ -1085,12 +1090,12 @@ static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1085 struct gfar_private *priv = netdev_priv(dev); 1090 struct gfar_private *priv = netdev_priv(dev);
1086 unsigned long flags; 1091 unsigned long flags;
1087 1092
1088 spin_lock_irqsave(&priv->lock, flags); 1093 spin_lock_irqsave(&priv->rxlock, flags);
1089 1094
1090 if (priv->vlgrp) 1095 if (priv->vlgrp)
1091 priv->vlgrp->vlan_devices[vid] = NULL; 1096 priv->vlgrp->vlan_devices[vid] = NULL;
1092 1097
1093 spin_unlock_irqrestore(&priv->lock, flags); 1098 spin_unlock_irqrestore(&priv->rxlock, flags);
1094} 1099}
1095 1100
1096 1101
@@ -1179,7 +1184,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1179 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1184 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1180 1185
1181 /* Lock priv */ 1186 /* Lock priv */
1182 spin_lock(&priv->lock); 1187 spin_lock(&priv->txlock);
1183 bdp = priv->dirty_tx; 1188 bdp = priv->dirty_tx;
1184 while ((bdp->status & TXBD_READY) == 0) { 1189 while ((bdp->status & TXBD_READY) == 0) {
1185 /* If dirty_tx and cur_tx are the same, then either the */ 1190 /* If dirty_tx and cur_tx are the same, then either the */
@@ -1224,7 +1229,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1224 else 1229 else
1225 gfar_write(&priv->regs->txic, 0); 1230 gfar_write(&priv->regs->txic, 0);
1226 1231
1227 spin_unlock(&priv->lock); 1232 spin_unlock(&priv->txlock);
1228 1233
1229 return IRQ_HANDLED; 1234 return IRQ_HANDLED;
1230} 1235}
@@ -1305,9 +1310,10 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1305{ 1310{
1306 struct net_device *dev = (struct net_device *) dev_id; 1311 struct net_device *dev = (struct net_device *) dev_id;
1307 struct gfar_private *priv = netdev_priv(dev); 1312 struct gfar_private *priv = netdev_priv(dev);
1308
1309#ifdef CONFIG_GFAR_NAPI 1313#ifdef CONFIG_GFAR_NAPI
1310 u32 tempval; 1314 u32 tempval;
1315#else
1316 unsigned long flags;
1311#endif 1317#endif
1312 1318
1313 /* Clear IEVENT, so rx interrupt isn't called again 1319 /* Clear IEVENT, so rx interrupt isn't called again
@@ -1330,7 +1336,7 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1330 } 1336 }
1331#else 1337#else
1332 1338
1333 spin_lock(&priv->lock); 1339 spin_lock_irqsave(&priv->rxlock, flags);
1334 gfar_clean_rx_ring(dev, priv->rx_ring_size); 1340 gfar_clean_rx_ring(dev, priv->rx_ring_size);
1335 1341
1336 /* If we are coalescing interrupts, update the timer */ 1342 /* If we are coalescing interrupts, update the timer */
@@ -1341,7 +1347,7 @@ irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1341 else 1347 else
1342 gfar_write(&priv->regs->rxic, 0); 1348 gfar_write(&priv->regs->rxic, 0);
1343 1349
1344 spin_unlock(&priv->lock); 1350 spin_unlock_irqrestore(&priv->rxlock, flags);
1345#endif 1351#endif
1346 1352
1347 return IRQ_HANDLED; 1353 return IRQ_HANDLED;
@@ -1490,13 +1496,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1490 /* Update the current rxbd pointer to be the next one */ 1496 /* Update the current rxbd pointer to be the next one */
1491 priv->cur_rx = bdp; 1497 priv->cur_rx = bdp;
1492 1498
1493 /* If no packets have arrived since the
1494 * last one we processed, clear the IEVENT RX and
1495 * BSY bits so that another interrupt won't be
1496 * generated when we set IMASK */
1497 if (bdp->status & RXBD_EMPTY)
1498 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1499
1500 return howmany; 1499 return howmany;
1501} 1500}
1502 1501
@@ -1516,7 +1515,7 @@ static int gfar_poll(struct net_device *dev, int *budget)
1516 rx_work_limit -= howmany; 1515 rx_work_limit -= howmany;
1517 *budget -= howmany; 1516 *budget -= howmany;
1518 1517
1519 if (rx_work_limit >= 0) { 1518 if (rx_work_limit > 0) {
1520 netif_rx_complete(dev); 1519 netif_rx_complete(dev);
1521 1520
1522 /* Clear the halt bit in RSTAT */ 1521 /* Clear the halt bit in RSTAT */
@@ -1533,7 +1532,8 @@ static int gfar_poll(struct net_device *dev, int *budget)
1533 gfar_write(&priv->regs->rxic, 0); 1532 gfar_write(&priv->regs->rxic, 0);
1534 } 1533 }
1535 1534
1536 return (rx_work_limit < 0) ? 1 : 0; 1535 /* Return 1 if there's more work to do */
1536 return (rx_work_limit > 0) ? 0 : 1;
1537} 1537}
1538#endif 1538#endif
1539 1539
@@ -1629,7 +1629,7 @@ static void adjust_link(struct net_device *dev)
1629 struct phy_device *phydev = priv->phydev; 1629 struct phy_device *phydev = priv->phydev;
1630 int new_state = 0; 1630 int new_state = 0;
1631 1631
1632 spin_lock_irqsave(&priv->lock, flags); 1632 spin_lock_irqsave(&priv->txlock, flags);
1633 if (phydev->link) { 1633 if (phydev->link) {
1634 u32 tempval = gfar_read(&regs->maccfg2); 1634 u32 tempval = gfar_read(&regs->maccfg2);
1635 u32 ecntrl = gfar_read(&regs->ecntrl); 1635 u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -1694,7 +1694,7 @@ static void adjust_link(struct net_device *dev)
1694 if (new_state && netif_msg_link(priv)) 1694 if (new_state && netif_msg_link(priv))
1695 phy_print_status(phydev); 1695 phy_print_status(phydev);
1696 1696
1697 spin_unlock_irqrestore(&priv->lock, flags); 1697 spin_unlock_irqrestore(&priv->txlock, flags);
1698} 1698}
1699 1699
1700/* Update the hash table based on the current list of multicast 1700/* Update the hash table based on the current list of multicast
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index d37d5401be6e..127c98cf3336 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -656,43 +656,62 @@ struct gfar {
656 * the buffer descriptor determines the actual condition. 656 * the buffer descriptor determines the actual condition.
657 */ 657 */
658struct gfar_private { 658struct gfar_private {
659 /* pointers to arrays of skbuffs for tx and rx */ 659 /* Fields controlled by TX lock */
660 spinlock_t txlock;
661
662 /* Pointer to the array of skbuffs */
660 struct sk_buff ** tx_skbuff; 663 struct sk_buff ** tx_skbuff;
661 struct sk_buff ** rx_skbuff;
662 664
663 /* indices pointing to the next free sbk in skb arrays */ 665 /* next free skb in the array */
664 u16 skb_curtx; 666 u16 skb_curtx;
665 u16 skb_currx;
666 667
667 /* index of the first skb which hasn't been transmitted 668 /* First skb in line to be transmitted */
668 * yet. */
669 u16 skb_dirtytx; 669 u16 skb_dirtytx;
670 670
671 /* Configuration info for the coalescing features */ 671 /* Configuration info for the coalescing features */
672 unsigned char txcoalescing; 672 unsigned char txcoalescing;
673 unsigned short txcount; 673 unsigned short txcount;
674 unsigned short txtime; 674 unsigned short txtime;
675
676 /* Buffer descriptor pointers */
677 struct txbd8 *tx_bd_base; /* First tx buffer descriptor */
678 struct txbd8 *cur_tx; /* Next free ring entry */
679 struct txbd8 *dirty_tx; /* First buffer in line
680 to be transmitted */
681 unsigned int tx_ring_size;
682
683 /* RX Locked fields */
684 spinlock_t rxlock;
685
686 /* skb array and index */
687 struct sk_buff ** rx_skbuff;
688 u16 skb_currx;
689
690 /* RX Coalescing values */
675 unsigned char rxcoalescing; 691 unsigned char rxcoalescing;
676 unsigned short rxcount; 692 unsigned short rxcount;
677 unsigned short rxtime; 693 unsigned short rxtime;
678 694
679 /* GFAR addresses */ 695 struct rxbd8 *rx_bd_base; /* First Rx buffers */
680 struct rxbd8 *rx_bd_base; /* Base addresses of Rx and Tx Buffers */
681 struct txbd8 *tx_bd_base;
682 struct rxbd8 *cur_rx; /* Next free rx ring entry */ 696 struct rxbd8 *cur_rx; /* Next free rx ring entry */
683 struct txbd8 *cur_tx; /* Next free ring entry */ 697
684 struct txbd8 *dirty_tx; /* The Ring entry to be freed. */ 698 /* RX parameters */
685 struct gfar __iomem *regs; /* Pointer to the GFAR memory mapped Registers */ 699 unsigned int rx_ring_size;
686 u32 __iomem *hash_regs[16];
687 int hash_width;
688 struct net_device_stats stats; /* linux network statistics */
689 struct gfar_extra_stats extra_stats;
690 spinlock_t lock;
691 unsigned int rx_buffer_size; 700 unsigned int rx_buffer_size;
692 unsigned int rx_stash_size; 701 unsigned int rx_stash_size;
693 unsigned int rx_stash_index; 702 unsigned int rx_stash_index;
694 unsigned int tx_ring_size; 703
695 unsigned int rx_ring_size; 704 struct vlan_group *vlgrp;
705
706 /* Unprotected fields */
707 /* Pointer to the GFAR memory mapped Registers */
708 struct gfar __iomem *regs;
709
710 /* Hash registers and their width */
711 u32 __iomem *hash_regs[16];
712 int hash_width;
713
714 /* global parameters */
696 unsigned int fifo_threshold; 715 unsigned int fifo_threshold;
697 unsigned int fifo_starve; 716 unsigned int fifo_starve;
698 unsigned int fifo_starve_off; 717 unsigned int fifo_starve_off;
@@ -702,13 +721,15 @@ struct gfar_private {
702 extended_hash:1, 721 extended_hash:1,
703 bd_stash_en:1; 722 bd_stash_en:1;
704 unsigned short padding; 723 unsigned short padding;
705 struct vlan_group *vlgrp; 724
706 /* Info structure initialized by board setup code */
707 unsigned int interruptTransmit; 725 unsigned int interruptTransmit;
708 unsigned int interruptReceive; 726 unsigned int interruptReceive;
709 unsigned int interruptError; 727 unsigned int interruptError;
728
729 /* info structure initialized by platform code */
710 struct gianfar_platform_data *einfo; 730 struct gianfar_platform_data *einfo;
711 731
732 /* PHY stuff */
712 struct phy_device *phydev; 733 struct phy_device *phydev;
713 struct mii_bus *mii_bus; 734 struct mii_bus *mii_bus;
714 int oldspeed; 735 int oldspeed;
@@ -716,6 +737,10 @@ struct gfar_private {
716 int oldlink; 737 int oldlink;
717 738
718 uint32_t msg_enable; 739 uint32_t msg_enable;
740
741 /* Network Statistics */
742 struct net_device_stats stats;
743 struct gfar_extra_stats extra_stats;
719}; 744};
720 745
721static inline u32 gfar_read(volatile unsigned __iomem *addr) 746static inline u32 gfar_read(volatile unsigned __iomem *addr)
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5de7b2e259dc..d69698c695ef 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -455,10 +455,14 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
455 455
456 /* Halt TX and RX, and process the frames which 456 /* Halt TX and RX, and process the frames which
457 * have already been received */ 457 * have already been received */
458 spin_lock_irqsave(&priv->lock, flags); 458 spin_lock_irqsave(&priv->txlock, flags);
459 spin_lock(&priv->rxlock);
460
459 gfar_halt(dev); 461 gfar_halt(dev);
460 gfar_clean_rx_ring(dev, priv->rx_ring_size); 462 gfar_clean_rx_ring(dev, priv->rx_ring_size);
461 spin_unlock_irqrestore(&priv->lock, flags); 463
464 spin_unlock(&priv->rxlock);
465 spin_unlock_irqrestore(&priv->txlock, flags);
462 466
463 /* Now we take down the rings to rebuild them */ 467 /* Now we take down the rings to rebuild them */
464 stop_gfar(dev); 468 stop_gfar(dev);
@@ -488,10 +492,14 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
488 492
489 /* Halt TX and RX, and process the frames which 493 /* Halt TX and RX, and process the frames which
490 * have already been received */ 494 * have already been received */
491 spin_lock_irqsave(&priv->lock, flags); 495 spin_lock_irqsave(&priv->txlock, flags);
496 spin_lock(&priv->rxlock);
497
492 gfar_halt(dev); 498 gfar_halt(dev);
493 gfar_clean_rx_ring(dev, priv->rx_ring_size); 499 gfar_clean_rx_ring(dev, priv->rx_ring_size);
494 spin_unlock_irqrestore(&priv->lock, flags); 500
501 spin_unlock(&priv->rxlock);
502 spin_unlock_irqrestore(&priv->txlock, flags);
495 503
496 /* Now we take down the rings to rebuild them */ 504 /* Now we take down the rings to rebuild them */
497 stop_gfar(dev); 505 stop_gfar(dev);
@@ -523,7 +531,7 @@ static int gfar_set_tx_csum(struct net_device *dev, uint32_t data)
523 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 531 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
524 return -EOPNOTSUPP; 532 return -EOPNOTSUPP;
525 533
526 spin_lock_irqsave(&priv->lock, flags); 534 spin_lock_irqsave(&priv->txlock, flags);
527 gfar_halt(dev); 535 gfar_halt(dev);
528 536
529 if (data) 537 if (data)
@@ -532,7 +540,7 @@ static int gfar_set_tx_csum(struct net_device *dev, uint32_t data)
532 dev->features &= ~NETIF_F_IP_CSUM; 540 dev->features &= ~NETIF_F_IP_CSUM;
533 541
534 gfar_start(dev); 542 gfar_start(dev);
535 spin_unlock_irqrestore(&priv->lock, flags); 543 spin_unlock_irqrestore(&priv->txlock, flags);
536 544
537 return 0; 545 return 0;
538} 546}
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 51ef181b1368..a6d5c43199cb 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -82,7 +82,7 @@ static ssize_t gfar_set_bd_stash(struct class_device *cdev,
82 else 82 else
83 return count; 83 return count;
84 84
85 spin_lock_irqsave(&priv->lock, flags); 85 spin_lock_irqsave(&priv->rxlock, flags);
86 86
87 /* Set the new stashing value */ 87 /* Set the new stashing value */
88 priv->bd_stash_en = new_setting; 88 priv->bd_stash_en = new_setting;
@@ -96,7 +96,7 @@ static ssize_t gfar_set_bd_stash(struct class_device *cdev,
96 96
97 gfar_write(&priv->regs->attr, temp); 97 gfar_write(&priv->regs->attr, temp);
98 98
99 spin_unlock_irqrestore(&priv->lock, flags); 99 spin_unlock_irqrestore(&priv->rxlock, flags);
100 100
101 return count; 101 return count;
102} 102}
@@ -118,7 +118,7 @@ static ssize_t gfar_set_rx_stash_size(struct class_device *cdev,
118 u32 temp; 118 u32 temp;
119 unsigned long flags; 119 unsigned long flags;
120 120
121 spin_lock_irqsave(&priv->lock, flags); 121 spin_lock_irqsave(&priv->rxlock, flags);
122 if (length > priv->rx_buffer_size) 122 if (length > priv->rx_buffer_size)
123 return count; 123 return count;
124 124
@@ -142,7 +142,7 @@ static ssize_t gfar_set_rx_stash_size(struct class_device *cdev,
142 142
143 gfar_write(&priv->regs->attr, temp); 143 gfar_write(&priv->regs->attr, temp);
144 144
145 spin_unlock_irqrestore(&priv->lock, flags); 145 spin_unlock_irqrestore(&priv->rxlock, flags);
146 146
147 return count; 147 return count;
148} 148}
@@ -166,7 +166,7 @@ static ssize_t gfar_set_rx_stash_index(struct class_device *cdev,
166 u32 temp; 166 u32 temp;
167 unsigned long flags; 167 unsigned long flags;
168 168
169 spin_lock_irqsave(&priv->lock, flags); 169 spin_lock_irqsave(&priv->rxlock, flags);
170 if (index > priv->rx_stash_size) 170 if (index > priv->rx_stash_size)
171 return count; 171 return count;
172 172
@@ -180,7 +180,7 @@ static ssize_t gfar_set_rx_stash_index(struct class_device *cdev,
180 temp |= ATTRELI_EI(index); 180 temp |= ATTRELI_EI(index);
181 gfar_write(&priv->regs->attreli, flags); 181 gfar_write(&priv->regs->attreli, flags);
182 182
183 spin_unlock_irqrestore(&priv->lock, flags); 183 spin_unlock_irqrestore(&priv->rxlock, flags);
184 184
185 return count; 185 return count;
186} 186}
@@ -205,7 +205,7 @@ static ssize_t gfar_set_fifo_threshold(struct class_device *cdev,
205 if (length > GFAR_MAX_FIFO_THRESHOLD) 205 if (length > GFAR_MAX_FIFO_THRESHOLD)
206 return count; 206 return count;
207 207
208 spin_lock_irqsave(&priv->lock, flags); 208 spin_lock_irqsave(&priv->txlock, flags);
209 209
210 priv->fifo_threshold = length; 210 priv->fifo_threshold = length;
211 211
@@ -214,7 +214,7 @@ static ssize_t gfar_set_fifo_threshold(struct class_device *cdev,
214 temp |= length; 214 temp |= length;
215 gfar_write(&priv->regs->fifo_tx_thr, temp); 215 gfar_write(&priv->regs->fifo_tx_thr, temp);
216 216
217 spin_unlock_irqrestore(&priv->lock, flags); 217 spin_unlock_irqrestore(&priv->txlock, flags);
218 218
219 return count; 219 return count;
220} 220}
@@ -240,7 +240,7 @@ static ssize_t gfar_set_fifo_starve(struct class_device *cdev,
240 if (num > GFAR_MAX_FIFO_STARVE) 240 if (num > GFAR_MAX_FIFO_STARVE)
241 return count; 241 return count;
242 242
243 spin_lock_irqsave(&priv->lock, flags); 243 spin_lock_irqsave(&priv->txlock, flags);
244 244
245 priv->fifo_starve = num; 245 priv->fifo_starve = num;
246 246
@@ -249,7 +249,7 @@ static ssize_t gfar_set_fifo_starve(struct class_device *cdev,
249 temp |= num; 249 temp |= num;
250 gfar_write(&priv->regs->fifo_tx_starve, temp); 250 gfar_write(&priv->regs->fifo_tx_starve, temp);
251 251
252 spin_unlock_irqrestore(&priv->lock, flags); 252 spin_unlock_irqrestore(&priv->txlock, flags);
253 253
254 return count; 254 return count;
255} 255}
@@ -274,7 +274,7 @@ static ssize_t gfar_set_fifo_starve_off(struct class_device *cdev,
274 if (num > GFAR_MAX_FIFO_STARVE_OFF) 274 if (num > GFAR_MAX_FIFO_STARVE_OFF)
275 return count; 275 return count;
276 276
277 spin_lock_irqsave(&priv->lock, flags); 277 spin_lock_irqsave(&priv->txlock, flags);
278 278
279 priv->fifo_starve_off = num; 279 priv->fifo_starve_off = num;
280 280
@@ -283,7 +283,7 @@ static ssize_t gfar_set_fifo_starve_off(struct class_device *cdev,
283 temp |= num; 283 temp |= num;
284 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp); 284 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp);
285 285
286 spin_unlock_irqrestore(&priv->lock, flags); 286 spin_unlock_irqrestore(&priv->txlock, flags);
287 287
288 return count; 288 return count;
289} 289}