aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorDale Farnsworth <dale@farnsworth.org>2006-01-27 03:09:18 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-01-27 11:11:16 -0500
commitf98e36f1f7903a319f7f87f96490e88f691ea106 (patch)
treecdad08d70b682bc36225a0a74cb5f49101bef6a3 /drivers/net/mv643xx_eth.c
parentcf4086c7725dc251551243c28325d446d9b1bf06 (diff)
[PATCH] mv643xx_eth: Rename mp->tx_ring_skbs to mp->tx_desc_count
tx_ring_skbs is actually a count of tx descriptors currently in use. Since there may be multiple descriptors per skb, it is not the same as the number of skbs in the ring. Also change rx_ring_skbs to rx_desc_count to be consistent. Signed-off-by: Dale Farnsworth <dale@farnsworth.org> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c51
1 files changed, 26 insertions, 25 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 4d5e3b8e7578..73c766b3cd11 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -169,11 +169,11 @@ static void mv643xx_eth_rx_task(void *data)
169 if (test_and_set_bit(0, &mp->rx_task_busy)) 169 if (test_and_set_bit(0, &mp->rx_task_busy))
170 panic("%s: Error in test_set_bit / clear_bit", dev->name); 170 panic("%s: Error in test_set_bit / clear_bit", dev->name);
171 171
172 while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { 172 while (mp->rx_desc_count < (mp->rx_ring_size - 5)) {
173 skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN); 173 skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN);
174 if (!skb) 174 if (!skb)
175 break; 175 break;
176 mp->rx_ring_skbs++; 176 mp->rx_desc_count++;
177 unaligned = (u32)skb->data & (DMA_ALIGN - 1); 177 unaligned = (u32)skb->data & (DMA_ALIGN - 1);
178 if (unaligned) 178 if (unaligned)
179 skb_reserve(skb, DMA_ALIGN - unaligned); 179 skb_reserve(skb, DMA_ALIGN - unaligned);
@@ -194,7 +194,7 @@ static void mv643xx_eth_rx_task(void *data)
194 * If RX ring is empty of SKB, set a timer to try allocating 194 * If RX ring is empty of SKB, set a timer to try allocating
195 * again in a later time . 195 * again in a later time .
196 */ 196 */
197 if ((mp->rx_ring_skbs == 0) && (mp->rx_timer_flag == 0)) { 197 if ((mp->rx_desc_count == 0) && (mp->rx_timer_flag == 0)) {
198 printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); 198 printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
199 /* After 100mSec */ 199 /* After 100mSec */
200 mp->timeout.expires = jiffies + (HZ / 10); 200 mp->timeout.expires = jiffies + (HZ / 10);
@@ -394,7 +394,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev)
394#else 394#else
395 while (eth_port_receive(mp, &pkt_info) == ETH_OK) { 395 while (eth_port_receive(mp, &pkt_info) == ETH_OK) {
396#endif 396#endif
397 mp->rx_ring_skbs--; 397 mp->rx_desc_count--;
398 received_packets++; 398 received_packets++;
399 399
400 /* Update statistics. Note byte count includes 4 byte CRC count */ 400 /* Update statistics. Note byte count includes 4 byte CRC count */
@@ -494,7 +494,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
494 /* UDP change : We may need this */ 494 /* UDP change : We may need this */
495 if ((eth_int_cause_ext & 0x0000ffff) && 495 if ((eth_int_cause_ext & 0x0000ffff) &&
496 (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) && 496 (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) &&
497 (mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB)) 497 (mp->tx_ring_size > mp->tx_desc_count + MAX_DESCS_PER_SKB))
498 netif_wake_queue(dev); 498 netif_wake_queue(dev);
499#ifdef MV643XX_NAPI 499#ifdef MV643XX_NAPI
500 } else { 500 } else {
@@ -778,7 +778,7 @@ static int mv643xx_eth_open(struct net_device *dev)
778 } 778 }
779 779
780 /* Allocate TX ring */ 780 /* Allocate TX ring */
781 mp->tx_ring_skbs = 0; 781 mp->tx_desc_count = 0;
782 size = mp->tx_ring_size * sizeof(struct eth_tx_desc); 782 size = mp->tx_ring_size * sizeof(struct eth_tx_desc);
783 mp->tx_desc_area_size = size; 783 mp->tx_desc_area_size = size;
784 784
@@ -803,7 +803,7 @@ static int mv643xx_eth_open(struct net_device *dev)
803 ether_init_tx_desc_ring(mp); 803 ether_init_tx_desc_ring(mp);
804 804
805 /* Allocate RX ring */ 805 /* Allocate RX ring */
806 mp->rx_ring_skbs = 0; 806 mp->rx_desc_count = 0;
807 size = mp->rx_ring_size * sizeof(struct eth_rx_desc); 807 size = mp->rx_ring_size * sizeof(struct eth_rx_desc);
808 mp->rx_desc_area_size = size; 808 mp->rx_desc_area_size = size;
809 809
@@ -880,17 +880,17 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
880 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00); 880 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
881 881
882 /* Free outstanding skb's on TX rings */ 882 /* Free outstanding skb's on TX rings */
883 for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) { 883 for (curr = 0; mp->tx_desc_count && curr < mp->tx_ring_size; curr++) {
884 skb = mp->tx_skb[curr]; 884 skb = mp->tx_skb[curr];
885 if (skb) { 885 if (skb) {
886 mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags; 886 mp->tx_desc_count -= skb_shinfo(skb)->nr_frags;
887 dev_kfree_skb(skb); 887 dev_kfree_skb(skb);
888 mp->tx_ring_skbs--; 888 mp->tx_desc_count--;
889 } 889 }
890 } 890 }
891 if (mp->tx_ring_skbs) 891 if (mp->tx_desc_count)
892 printk("%s: Error on Tx descriptor free - could not free %d" 892 printk("%s: Error on Tx descriptor free - could not free %d"
893 " descriptors\n", dev->name, mp->tx_ring_skbs); 893 " descriptors\n", dev->name, mp->tx_desc_count);
894 894
895 /* Free TX ring */ 895 /* Free TX ring */
896 if (mp->tx_sram_size) 896 if (mp->tx_sram_size)
@@ -910,18 +910,18 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev)
910 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); 910 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
911 911
912 /* Free preallocated skb's on RX rings */ 912 /* Free preallocated skb's on RX rings */
913 for (curr = 0; mp->rx_ring_skbs && curr < mp->rx_ring_size; curr++) { 913 for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) {
914 if (mp->rx_skb[curr]) { 914 if (mp->rx_skb[curr]) {
915 dev_kfree_skb(mp->rx_skb[curr]); 915 dev_kfree_skb(mp->rx_skb[curr]);
916 mp->rx_ring_skbs--; 916 mp->rx_desc_count--;
917 } 917 }
918 } 918 }
919 919
920 if (mp->rx_ring_skbs) 920 if (mp->rx_desc_count)
921 printk(KERN_ERR 921 printk(KERN_ERR
922 "%s: Error in freeing Rx Ring. %d skb's still" 922 "%s: Error in freeing Rx Ring. %d skb's still"
923 " stuck in RX Ring - ignoring them\n", dev->name, 923 " stuck in RX Ring - ignoring them\n", dev->name,
924 mp->rx_ring_skbs); 924 mp->rx_desc_count);
925 /* Free RX ring */ 925 /* Free RX ring */
926 if (mp->rx_sram_size) 926 if (mp->rx_sram_size)
927 iounmap(mp->p_rx_desc_area); 927 iounmap(mp->p_rx_desc_area);
@@ -991,7 +991,8 @@ static void mv643xx_tx(struct net_device *dev)
991 } 991 }
992 992
993 if (netif_queue_stopped(dev) && 993 if (netif_queue_stopped(dev) &&
994 mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB) 994 mp->tx_ring_size >
995 mp->tx_desc_count + MAX_DESCS_PER_SKB)
995 netif_wake_queue(dev); 996 netif_wake_queue(dev);
996} 997}
997 998
@@ -1083,7 +1084,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1083 } 1084 }
1084 1085
1085 /* This is a hard error, log it. */ 1086 /* This is a hard error, log it. */
1086 if ((mp->tx_ring_size - mp->tx_ring_skbs) <= 1087 if ((mp->tx_ring_size - mp->tx_desc_count) <=
1087 (skb_shinfo(skb)->nr_frags + 1)) { 1088 (skb_shinfo(skb)->nr_frags + 1)) {
1088 netif_stop_queue(dev); 1089 netif_stop_queue(dev);
1089 printk(KERN_ERR 1090 printk(KERN_ERR
@@ -1260,7 +1261,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1260 /* Check if TX queue can handle another skb. If not, then 1261 /* Check if TX queue can handle another skb. If not, then
1261 * signal higher layers to stop requesting TX 1262 * signal higher layers to stop requesting TX
1262 */ 1263 */
1263 if (mp->tx_ring_size <= (mp->tx_ring_skbs + MAX_DESCS_PER_SKB)) 1264 if (mp->tx_ring_size <= (mp->tx_desc_count + MAX_DESCS_PER_SKB))
1264 /* 1265 /*
1265 * Stop getting skb's from upper layers. 1266 * Stop getting skb's from upper layers.
1266 * Getting skb's from upper layers will be enabled again after 1267 * Getting skb's from upper layers will be enabled again after
@@ -2563,8 +2564,8 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2563 return ETH_ERROR; 2564 return ETH_ERROR;
2564 } 2565 }
2565 2566
2566 mp->tx_ring_skbs++; 2567 mp->tx_desc_count++;
2567 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); 2568 BUG_ON(mp->tx_desc_count > mp->tx_ring_size);
2568 2569
2569 /* Get the Tx Desc ring indexes */ 2570 /* Get the Tx Desc ring indexes */
2570 tx_desc_curr = mp->tx_curr_desc_q; 2571 tx_desc_curr = mp->tx_curr_desc_q;
@@ -2632,8 +2633,8 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
2632 if (mp->tx_resource_err) 2633 if (mp->tx_resource_err)
2633 return ETH_QUEUE_FULL; 2634 return ETH_QUEUE_FULL;
2634 2635
2635 mp->tx_ring_skbs++; 2636 mp->tx_desc_count++;
2636 BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); 2637 BUG_ON(mp->tx_desc_count > mp->tx_ring_size);
2637 2638
2638 /* Get the Tx Desc ring indexes */ 2639 /* Get the Tx Desc ring indexes */
2639 tx_desc_curr = mp->tx_curr_desc_q; 2640 tx_desc_curr = mp->tx_curr_desc_q;
@@ -2747,8 +2748,8 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2747 /* Any Tx return cancels the Tx resource error status */ 2748 /* Any Tx return cancels the Tx resource error status */
2748 mp->tx_resource_err = 0; 2749 mp->tx_resource_err = 0;
2749 2750
2750 BUG_ON(mp->tx_ring_skbs == 0); 2751 BUG_ON(mp->tx_desc_count == 0);
2751 mp->tx_ring_skbs--; 2752 mp->tx_desc_count--;
2752 2753
2753out: 2754out:
2754 spin_unlock_irqrestore(&mp->lock, flags); 2755 spin_unlock_irqrestore(&mp->lock, flags);