aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorDale Farnsworth <dale@farnsworth.org>2006-01-16 18:56:30 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-01-17 07:23:38 -0500
commit8f5187035ad475c90ca865318daa09ba43bc3e68 (patch)
tree43b304d094d8fec20a2589239f56ff31bc4aa8d1 /drivers/net/mv643xx_eth.c
parentdd09b1de08b28ccfb130ca97d617dc3283165d22 (diff)
[PATCH] mv643xx_eth: Hold spinlocks only where needed
This driver has historically held a spin_lock during the entire open and stop functions and while receiving multiple packets. This is unecessarily long and holds locks during calls that may sleep. This patch reduces the size of windows where locks are held. Signed-off-by: Dale Farnsworth <dale@farnsworth.org> mv643xx_eth.c | 172 ++++++++++++++++++++++++++++++---------------------------- 1 file changed, 91 insertions(+), 81 deletions(-) Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c170
1 files changed, 90 insertions, 80 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index f632323fbf06..f6d4ea175e11 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -129,15 +129,8 @@ static inline void mv_write(int offset, u32 data)
129 */ 129 */
130static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 130static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
131{ 131{
132 struct mv643xx_private *mp = netdev_priv(dev); 132 if ((new_mtu > 9500) || (new_mtu < 64))
133 unsigned long flags;
134
135 spin_lock_irqsave(&mp->lock, flags);
136
137 if ((new_mtu > 9500) || (new_mtu < 64)) {
138 spin_unlock_irqrestore(&mp->lock, flags);
139 return -EINVAL; 133 return -EINVAL;
140 }
141 134
142 dev->mtu = new_mtu; 135 dev->mtu = new_mtu;
143 /* 136 /*
@@ -157,7 +150,6 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
157 dev->name); 150 dev->name);
158 } 151 }
159 152
160 spin_unlock_irqrestore(&mp->lock, flags);
161 return 0; 153 return 0;
162} 154}
163 155
@@ -353,8 +345,6 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
353 if (!(eth_int_cause_ext & (BIT0 | BIT8))) 345 if (!(eth_int_cause_ext & (BIT0 | BIT8)))
354 return released; 346 return released;
355 347
356 spin_lock(&mp->lock);
357
358 /* Check only queue 0 */ 348 /* Check only queue 0 */
359 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { 349 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
360 if (pkt_info.cmd_sts & BIT0) { 350 if (pkt_info.cmd_sts & BIT0) {
@@ -377,8 +367,6 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
377 } 367 }
378 } 368 }
379 369
380 spin_unlock(&mp->lock);
381
382 return released; 370 return released;
383} 371}
384 372
@@ -518,6 +506,8 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
518 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); 506 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
519 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG 507 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG
520 (port_num), 0); 508 (port_num), 0);
509 /* ensure previous writes have taken effect */
510 mv_read(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num));
521 __netif_rx_schedule(dev); 511 __netif_rx_schedule(dev);
522 } 512 }
523#else 513#else
@@ -533,6 +523,9 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
533 /* Unmask all interrupts on ethernet port */ 523 /* Unmask all interrupts on ethernet port */
534 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 524 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
535 INT_CAUSE_MASK_ALL); 525 INT_CAUSE_MASK_ALL);
526 /* wait for previous write to take effect */
527 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
528
536 queue_task(&mp->rx_task, &tq_immediate); 529 queue_task(&mp->rx_task, &tq_immediate);
537 mark_bh(IMMEDIATE_BH); 530 mark_bh(IMMEDIATE_BH);
538#else 531#else
@@ -657,34 +650,20 @@ static int mv643xx_eth_open(struct net_device *dev)
657 unsigned int port_num = mp->port_num; 650 unsigned int port_num = mp->port_num;
658 int err; 651 int err;
659 652
660 spin_lock_irq(&mp->lock);
661
662 err = request_irq(dev->irq, mv643xx_eth_int_handler, 653 err = request_irq(dev->irq, mv643xx_eth_int_handler,
663 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 654 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
664
665 if (err) { 655 if (err) {
666 printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n", 656 printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
667 port_num); 657 port_num);
668 err = -EAGAIN; 658 return -EAGAIN;
669 goto out;
670 } 659 }
671 660
672 if (mv643xx_eth_real_open(dev)) { 661 if (mv643xx_eth_real_open(dev)) {
673 printk("%s: Error opening interface\n", dev->name); 662 printk("%s: Error opening interface\n", dev->name);
663 free_irq(dev->irq, dev);
674 err = -EBUSY; 664 err = -EBUSY;
675 goto out_free;
676 } 665 }
677 666
678 spin_unlock_irq(&mp->lock);
679
680 return 0;
681
682out_free:
683 free_irq(dev->irq, dev);
684
685out:
686 spin_unlock_irq(&mp->lock);
687
688 return err; 667 return err;
689} 668}
690 669
@@ -790,18 +769,6 @@ static int mv643xx_eth_real_open(struct net_device *dev)
790 /* Stop RX Queues */ 769 /* Stop RX Queues */
791 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); 770 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
792 771
793 /* Clear the ethernet port interrupts */
794 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
795 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
796
797 /* Unmask RX buffer and TX end interrupt */
798 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
799 INT_CAUSE_UNMASK_ALL);
800
801 /* Unmask phy and link status changes interrupts */
802 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
803 INT_CAUSE_UNMASK_ALL_EXT);
804
805 /* Set the MAC Address */ 772 /* Set the MAC Address */
806 memcpy(mp->port_mac_addr, dev->dev_addr, 6); 773 memcpy(mp->port_mac_addr, dev->dev_addr, 6);
807 774
@@ -903,8 +870,17 @@ static int mv643xx_eth_real_open(struct net_device *dev)
903 mp->tx_int_coal = 870 mp->tx_int_coal =
904 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); 871 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
905 872
906 netif_start_queue(dev); 873 /* Clear any pending ethernet port interrupts */
874 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
875 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
876
877 /* Unmask phy and link status changes interrupts */
878 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
879 INT_CAUSE_UNMASK_ALL_EXT);
907 880
881 /* Unmask RX buffer and TX end interrupt */
882 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
883 INT_CAUSE_UNMASK_ALL);
908 return 0; 884 return 0;
909} 885}
910 886
@@ -983,37 +959,38 @@ static int mv643xx_eth_real_stop(struct net_device *dev)
983 struct mv643xx_private *mp = netdev_priv(dev); 959 struct mv643xx_private *mp = netdev_priv(dev);
984 unsigned int port_num = mp->port_num; 960 unsigned int port_num = mp->port_num;
985 961
962 /* Mask RX buffer and TX end interrupt */
963 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
964
965 /* Mask phy and link status changes interrupts */
966 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
967
968 /* ensure previous writes have taken effect */
969 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
970
971#ifdef MV643XX_NAPI
972 netif_poll_disable(dev);
973#endif
986 netif_carrier_off(dev); 974 netif_carrier_off(dev);
987 netif_stop_queue(dev); 975 netif_stop_queue(dev);
988 976
989 mv643xx_eth_free_tx_rings(dev);
990 mv643xx_eth_free_rx_rings(dev);
991
992 eth_port_reset(mp->port_num); 977 eth_port_reset(mp->port_num);
993 978
994 /* Disable ethernet port interrupts */ 979 mv643xx_eth_free_tx_rings(dev);
995 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 980 mv643xx_eth_free_rx_rings(dev);
996 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
997
998 /* Mask RX buffer and TX end interrupt */
999 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
1000 981
1001 /* Mask phy and link status changes interrupts */ 982#ifdef MV643XX_NAPI
1002 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0); 983 netif_poll_enable(dev);
984#endif
1003 985
1004 return 0; 986 return 0;
1005} 987}
1006 988
1007static int mv643xx_eth_stop(struct net_device *dev) 989static int mv643xx_eth_stop(struct net_device *dev)
1008{ 990{
1009 struct mv643xx_private *mp = netdev_priv(dev);
1010
1011 spin_lock_irq(&mp->lock);
1012
1013 mv643xx_eth_real_stop(dev); 991 mv643xx_eth_real_stop(dev);
1014 992
1015 free_irq(dev->irq, dev); 993 free_irq(dev->irq, dev);
1016 spin_unlock_irq(&mp->lock);
1017 994
1018 return 0; 995 return 0;
1019} 996}
@@ -1053,14 +1030,11 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1053 struct mv643xx_private *mp = netdev_priv(dev); 1030 struct mv643xx_private *mp = netdev_priv(dev);
1054 int done = 1, orig_budget, work_done; 1031 int done = 1, orig_budget, work_done;
1055 unsigned int port_num = mp->port_num; 1032 unsigned int port_num = mp->port_num;
1056 unsigned long flags;
1057 1033
1058#ifdef MV643XX_TX_FAST_REFILL 1034#ifdef MV643XX_TX_FAST_REFILL
1059 if (++mp->tx_clean_threshold > 5) { 1035 if (++mp->tx_clean_threshold > 5) {
1060 spin_lock_irqsave(&mp->lock, flags);
1061 mv643xx_tx(dev); 1036 mv643xx_tx(dev);
1062 mp->tx_clean_threshold = 0; 1037 mp->tx_clean_threshold = 0;
1063 spin_unlock_irqrestore(&mp->lock, flags);
1064 } 1038 }
1065#endif 1039#endif
1066 1040
@@ -1078,15 +1052,13 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1078 } 1052 }
1079 1053
1080 if (done) { 1054 if (done) {
1081 spin_lock_irqsave(&mp->lock, flags); 1055 netif_rx_complete(dev);
1082 __netif_rx_complete(dev);
1083 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 1056 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
1084 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1057 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1085 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 1058 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1086 INT_CAUSE_UNMASK_ALL); 1059 INT_CAUSE_UNMASK_ALL);
1087 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 1060 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
1088 INT_CAUSE_UNMASK_ALL_EXT); 1061 INT_CAUSE_UNMASK_ALL_EXT);
1089 spin_unlock_irqrestore(&mp->lock, flags);
1090 } 1062 }
1091 1063
1092 return done ? 0 : 1; 1064 return done ? 0 : 1;
@@ -2687,6 +2659,7 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2687 struct eth_tx_desc *current_descriptor; 2659 struct eth_tx_desc *current_descriptor;
2688 struct eth_tx_desc *first_descriptor; 2660 struct eth_tx_desc *first_descriptor;
2689 u32 command; 2661 u32 command;
2662 unsigned long flags;
2690 2663
2691 /* Do not process Tx ring in case of Tx ring resource error */ 2664 /* Do not process Tx ring in case of Tx ring resource error */
2692 if (mp->tx_resource_err) 2665 if (mp->tx_resource_err)
@@ -2703,6 +2676,8 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2703 return ETH_ERROR; 2676 return ETH_ERROR;
2704 } 2677 }
2705 2678
2679 spin_lock_irqsave(&mp->lock, flags);
2680
2706 mp->tx_ring_skbs++; 2681 mp->tx_ring_skbs++;
2707 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); 2682 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2708 2683
@@ -2752,11 +2727,15 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2752 mp->tx_resource_err = 1; 2727 mp->tx_resource_err = 1;
2753 mp->tx_curr_desc_q = tx_first_desc; 2728 mp->tx_curr_desc_q = tx_first_desc;
2754 2729
2730 spin_unlock_irqrestore(&mp->lock, flags);
2731
2755 return ETH_QUEUE_LAST_RESOURCE; 2732 return ETH_QUEUE_LAST_RESOURCE;
2756 } 2733 }
2757 2734
2758 mp->tx_curr_desc_q = tx_next_desc; 2735 mp->tx_curr_desc_q = tx_next_desc;
2759 2736
2737 spin_unlock_irqrestore(&mp->lock, flags);
2738
2760 return ETH_OK; 2739 return ETH_OK;
2761} 2740}
2762#else 2741#else
@@ -2767,11 +2746,14 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2767 int tx_desc_used; 2746 int tx_desc_used;
2768 struct eth_tx_desc *current_descriptor; 2747 struct eth_tx_desc *current_descriptor;
2769 unsigned int command_status; 2748 unsigned int command_status;
2749 unsigned long flags;
2770 2750
2771 /* Do not process Tx ring in case of Tx ring resource error */ 2751 /* Do not process Tx ring in case of Tx ring resource error */
2772 if (mp->tx_resource_err) 2752 if (mp->tx_resource_err)
2773 return ETH_QUEUE_FULL; 2753 return ETH_QUEUE_FULL;
2774 2754
2755 spin_lock_irqsave(&mp->lock, flags);
2756
2775 mp->tx_ring_skbs++; 2757 mp->tx_ring_skbs++;
2776 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); 2758 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
2777 2759
@@ -2802,9 +2784,12 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2802 /* Check for ring index overlap in the Tx desc ring */ 2784 /* Check for ring index overlap in the Tx desc ring */
2803 if (tx_desc_curr == tx_desc_used) { 2785 if (tx_desc_curr == tx_desc_used) {
2804 mp->tx_resource_err = 1; 2786 mp->tx_resource_err = 1;
2787
2788 spin_unlock_irqrestore(&mp->lock, flags);
2805 return ETH_QUEUE_LAST_RESOURCE; 2789 return ETH_QUEUE_LAST_RESOURCE;
2806 } 2790 }
2807 2791
2792 spin_unlock_irqrestore(&mp->lock, flags);
2808 return ETH_OK; 2793 return ETH_OK;
2809} 2794}
2810#endif 2795#endif
@@ -2827,23 +2812,27 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2827 * Tx ring 'first' and 'used' indexes are updated. 2812 * Tx ring 'first' and 'used' indexes are updated.
2828 * 2813 *
2829 * RETURN: 2814 * RETURN:
2830 * ETH_ERROR in case the routine can not access Tx desc ring. 2815 * ETH_OK on success
2831 * ETH_RETRY in case there is transmission in process. 2816 * ETH_ERROR otherwise.
2832 * ETH_END_OF_JOB if the routine has nothing to release.
2833 * ETH_OK otherwise.
2834 * 2817 *
2835 */ 2818 */
2836static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, 2819static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2837 struct pkt_info *p_pkt_info) 2820 struct pkt_info *p_pkt_info)
2838{ 2821{
2839 int tx_desc_used; 2822 int tx_desc_used;
2823 int tx_busy_desc;
2824 struct eth_tx_desc *p_tx_desc_used;
2825 unsigned int command_status;
2826 unsigned long flags;
2827 int err = ETH_OK;
2828
2829 spin_lock_irqsave(&mp->lock, flags);
2830
2840#ifdef MV643XX_CHECKSUM_OFFLOAD_TX 2831#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
2841 int tx_busy_desc = mp->tx_first_desc_q; 2832 tx_busy_desc = mp->tx_first_desc_q;
2842#else 2833#else
2843 int tx_busy_desc = mp->tx_curr_desc_q; 2834 tx_busy_desc = mp->tx_curr_desc_q;
2844#endif 2835#endif
2845 struct eth_tx_desc *p_tx_desc_used;
2846 unsigned int command_status;
2847 2836
2848 /* Get the Tx Desc ring indexes */ 2837 /* Get the Tx Desc ring indexes */
2849 tx_desc_used = mp->tx_used_desc_q; 2838 tx_desc_used = mp->tx_used_desc_q;
@@ -2851,18 +2840,24 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2851 p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; 2840 p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
2852 2841
2853 /* Sanity check */ 2842 /* Sanity check */
2854 if (p_tx_desc_used == NULL) 2843 if (p_tx_desc_used == NULL) {
2855 return ETH_ERROR; 2844 err = ETH_ERROR;
2845 goto out;
2846 }
2856 2847
2857 /* Stop release. About to overlap the current available Tx descriptor */ 2848 /* Stop release. About to overlap the current available Tx descriptor */
2858 if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) 2849 if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) {
2859 return ETH_END_OF_JOB; 2850 err = ETH_ERROR;
2851 goto out;
2852 }
2860 2853
2861 command_status = p_tx_desc_used->cmd_sts; 2854 command_status = p_tx_desc_used->cmd_sts;
2862 2855
2863 /* Still transmitting... */ 2856 /* Still transmitting... */
2864 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) 2857 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2865 return ETH_RETRY; 2858 err = ETH_ERROR;
2859 goto out;
2860 }
2866 2861
2867 /* Pass the packet information to the caller */ 2862 /* Pass the packet information to the caller */
2868 p_pkt_info->cmd_sts = command_status; 2863 p_pkt_info->cmd_sts = command_status;
@@ -2880,7 +2875,10 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2880 BUG_ON(mp->tx_ring_skbs == 0); 2875 BUG_ON(mp->tx_ring_skbs == 0);
2881 mp->tx_ring_skbs--; 2876 mp->tx_ring_skbs--;
2882 2877
2883 return ETH_OK; 2878out:
2879 spin_unlock_irqrestore(&mp->lock, flags);
2880
2881 return err;
2884} 2882}
2885 2883
2886/* 2884/*
@@ -2912,11 +2910,14 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2912 int rx_next_curr_desc, rx_curr_desc, rx_used_desc; 2910 int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
2913 volatile struct eth_rx_desc *p_rx_desc; 2911 volatile struct eth_rx_desc *p_rx_desc;
2914 unsigned int command_status; 2912 unsigned int command_status;
2913 unsigned long flags;
2915 2914
2916 /* Do not process Rx ring in case of Rx ring resource error */ 2915 /* Do not process Rx ring in case of Rx ring resource error */
2917 if (mp->rx_resource_err) 2916 if (mp->rx_resource_err)
2918 return ETH_QUEUE_FULL; 2917 return ETH_QUEUE_FULL;
2919 2918
2919 spin_lock_irqsave(&mp->lock, flags);
2920
2920 /* Get the Rx Desc ring 'curr and 'used' indexes */ 2921 /* Get the Rx Desc ring 'curr and 'used' indexes */
2921 rx_curr_desc = mp->rx_curr_desc_q; 2922 rx_curr_desc = mp->rx_curr_desc_q;
2922 rx_used_desc = mp->rx_used_desc_q; 2923 rx_used_desc = mp->rx_used_desc_q;
@@ -2928,8 +2929,10 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2928 rmb(); 2929 rmb();
2929 2930
2930 /* Nothing to receive... */ 2931 /* Nothing to receive... */
2931 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) 2932 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2933 spin_unlock_irqrestore(&mp->lock, flags);
2932 return ETH_END_OF_JOB; 2934 return ETH_END_OF_JOB;
2935 }
2933 2936
2934 p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET; 2937 p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
2935 p_pkt_info->cmd_sts = command_status; 2938 p_pkt_info->cmd_sts = command_status;
@@ -2949,6 +2952,8 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2949 if (rx_next_curr_desc == rx_used_desc) 2952 if (rx_next_curr_desc == rx_used_desc)
2950 mp->rx_resource_err = 1; 2953 mp->rx_resource_err = 1;
2951 2954
2955 spin_unlock_irqrestore(&mp->lock, flags);
2956
2952 return ETH_OK; 2957 return ETH_OK;
2953} 2958}
2954 2959
@@ -2977,6 +2982,9 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
2977{ 2982{
2978 int used_rx_desc; /* Where to return Rx resource */ 2983 int used_rx_desc; /* Where to return Rx resource */
2979 volatile struct eth_rx_desc *p_used_rx_desc; 2984 volatile struct eth_rx_desc *p_used_rx_desc;
2985 unsigned long flags;
2986
2987 spin_lock_irqsave(&mp->lock, flags);
2980 2988
2981 /* Get 'used' Rx descriptor */ 2989 /* Get 'used' Rx descriptor */
2982 used_rx_desc = mp->rx_used_desc_q; 2990 used_rx_desc = mp->rx_used_desc_q;
@@ -3000,6 +3008,8 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
3000 /* Any Rx return cancels the Rx resource error status */ 3008 /* Any Rx return cancels the Rx resource error status */
3001 mp->rx_resource_err = 0; 3009 mp->rx_resource_err = 0;
3002 3010
3011 spin_unlock_irqrestore(&mp->lock, flags);
3012
3003 return ETH_OK; 3013 return ETH_OK;
3004} 3014}
3005 3015