aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDale Farnsworth <dale@farnsworth.org>2006-03-03 12:02:51 -0500
committerJeff Garzik <jeff@garzik.org>2006-03-03 12:12:36 -0500
commitff561eef9fb37c7180085e08418acfc009a9ada7 (patch)
tree97010ae360fdb25238698ad811c22f6d901e1fde
parentc8aaea25e0b069e9572caa74f984e109899c1765 (diff)
[PATCH] mv643xx_eth: Refactor/clean up tx queue handling
Signed-off-by: Dale Farnsworth <dale@farnsworth.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r--drivers/net/mv643xx_eth.c257
-rw-r--r--drivers/net/mv643xx_eth.h4
2 files changed, 84 insertions, 177 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 260be8048d3c..8a24b39f3ccb 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -62,6 +62,9 @@
62#define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN 62#define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
63#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) 63#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
64 64
65#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
66#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
67
65#define INT_UNMASK_ALL 0x0007ffff 68#define INT_UNMASK_ALL 0x0007ffff
66#define INT_UNMASK_ALL_EXT 0x0011ffff 69#define INT_UNMASK_ALL_EXT 0x0011ffff
67#define INT_MASK_ALL 0x00000000 70#define INT_MASK_ALL 0x00000000
@@ -333,49 +336,78 @@ static void mv643xx_eth_tx_timeout_task(struct net_device *dev)
333 netif_device_attach(dev); 336 netif_device_attach(dev);
334} 337}
335 338
336/* 339/**
337 * mv643xx_eth_free_tx_queue 340 * mv643xx_eth_free_tx_descs - Free the tx desc data for completed descriptors
338 *
339 * Input : dev - a pointer to the required interface
340 * 341 *
341 * Output : 0 if was able to release skb , nonzero otherwise 342 * If force is non-zero, frees uncompleted descriptors as well
342 */ 343 */
343static int mv643xx_eth_free_tx_queue(struct net_device *dev, 344int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
344 unsigned int eth_int_cause_ext)
345{ 345{
346 struct mv643xx_private *mp = netdev_priv(dev); 346 struct mv643xx_private *mp = netdev_priv(dev);
347 struct net_device_stats *stats = &mp->stats; 347 struct eth_tx_desc *desc;
348 struct pkt_info pkt_info; 348 u32 cmd_sts;
349 int released = 1; 349 struct sk_buff *skb;
350 unsigned long flags;
351 int tx_index;
352 dma_addr_t addr;
353 int count;
354 int released = 0;
350 355
351 if (!(eth_int_cause_ext & (BIT0 | BIT8))) 356 while (mp->tx_desc_count > 0) {
352 return released; 357 spin_lock_irqsave(&mp->lock, flags);
358 tx_index = mp->tx_used_desc_q;
359 desc = &mp->p_tx_desc_area[tx_index];
360 cmd_sts = desc->cmd_sts;
361
362 if (!force && (cmd_sts & ETH_BUFFER_OWNED_BY_DMA)) {
363 spin_unlock_irqrestore(&mp->lock, flags);
364 return released;
365 }
366
367 mp->tx_used_desc_q = (tx_index + 1) % mp->tx_ring_size;
368 mp->tx_desc_count--;
369
370 addr = desc->buf_ptr;
371 count = desc->byte_cnt;
372 skb = mp->tx_skb[tx_index];
373 if (skb)
374 mp->tx_skb[tx_index] = NULL;
375
376 spin_unlock_irqrestore(&mp->lock, flags);
353 377
354 /* Check only queue 0 */ 378 if (cmd_sts & BIT0) {
355 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
356 if (pkt_info.cmd_sts & BIT0) {
357 printk("%s: Error in TX\n", dev->name); 379 printk("%s: Error in TX\n", dev->name);
358 stats->tx_errors++; 380 mp->stats.tx_errors++;
359 } 381 }
360 382
361 if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC) 383 if (cmd_sts & ETH_TX_FIRST_DESC)
362 dma_unmap_single(NULL, pkt_info.buf_ptr, 384 dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
363 pkt_info.byte_cnt,
364 DMA_TO_DEVICE);
365 else 385 else
366 dma_unmap_page(NULL, pkt_info.buf_ptr, 386 dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
367 pkt_info.byte_cnt,
368 DMA_TO_DEVICE);
369 387
370 if (pkt_info.return_info) { 388 if (skb)
371 dev_kfree_skb_irq(pkt_info.return_info); 389 dev_kfree_skb_irq(skb);
372 released = 0; 390
373 } 391 released = 1;
374 } 392 }
375 393
376 return released; 394 return released;
377} 395}
378 396
397static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev)
398{
399 struct mv643xx_private *mp = netdev_priv(dev);
400
401 if (mv643xx_eth_free_tx_descs(dev, 0) &&
402 mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
403 netif_wake_queue(dev);
404}
405
406static void mv643xx_eth_free_all_tx_descs(struct net_device *dev)
407{
408 mv643xx_eth_free_tx_descs(dev, 1);
409}
410
379/* 411/*
380 * mv643xx_eth_receive 412 * mv643xx_eth_receive
381 * 413 *
@@ -547,15 +579,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
547 */ 579 */
548 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 580 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num),
549 ~eth_int_cause); 581 ~eth_int_cause);
550 if (eth_int_cause_ext != 0x0) 582 if (eth_int_cause_ext != 0x0) {
551 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG 583 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG
552 (port_num), ~eth_int_cause_ext); 584 (port_num), ~eth_int_cause_ext);
553 585 /* UDP change : We may need this */
554 /* UDP change : We may need this */ 586 if (eth_int_cause_ext & (BIT0 | BIT8))
555 if ((eth_int_cause_ext & 0x0000ffff) && 587 mv643xx_eth_free_completed_tx_descs(dev);
556 (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) && 588 }
557 (mp->tx_ring_size - mp->tx_desc_count > MAX_DESCS_PER_SKB))
558 netif_wake_queue(dev);
559#ifdef MV643XX_NAPI 589#ifdef MV643XX_NAPI
560 } else { 590 } else {
561 if (netif_rx_schedule_prep(dev)) { 591 if (netif_rx_schedule_prep(dev)) {
@@ -596,14 +626,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
596 if (mii_link_ok(&mp->mii)) { 626 if (mii_link_ok(&mp->mii)) {
597 mii_ethtool_gset(&mp->mii, &cmd); 627 mii_ethtool_gset(&mp->mii, &cmd);
598 mv643xx_eth_update_pscr(dev, &cmd); 628 mv643xx_eth_update_pscr(dev, &cmd);
629 mv643xx_eth_port_enable_tx(port_num,
630 ETH_TX_QUEUES_ENABLED);
599 if (!netif_carrier_ok(dev)) { 631 if (!netif_carrier_ok(dev)) {
600 netif_carrier_on(dev); 632 netif_carrier_on(dev);
601 if (mp->tx_ring_size - mp->tx_desc_count > 633 if (mp->tx_ring_size - mp->tx_desc_count >=
602 MAX_DESCS_PER_SKB) { 634 MAX_DESCS_PER_SKB)
603 netif_wake_queue(dev); 635 netif_wake_queue(dev);
604 /* Start TX queue */
605 mv643xx_eth_port_enable_tx(port_num, mp->port_tx_queue_command);
606 }
607 } 636 }
608 } else if (netif_carrier_ok(dev)) { 637 } else if (netif_carrier_ok(dev)) {
609 netif_stop_queue(dev); 638 netif_stop_queue(dev);
@@ -735,9 +764,6 @@ static void ether_init_rx_desc_ring(struct mv643xx_private *mp)
735 mp->rx_used_desc_q = 0; 764 mp->rx_used_desc_q = 0;
736 765
737 mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); 766 mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);
738
739 /* Enable queue 0 for this port */
740 mp->port_rx_queue_command = 1;
741} 767}
742 768
743/* 769/*
@@ -779,9 +805,6 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
779 mp->tx_used_desc_q = 0; 805 mp->tx_used_desc_q = 0;
780 806
781 mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); 807 mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc);
782
783 /* Enable queue 0 for this port */
784 mp->port_tx_queue_command = 1;
785} 808}
786 809
787static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 810static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -963,25 +986,14 @@ out_free_irq:
963static void mv643xx_eth_free_tx_rings(struct net_device *dev) 986static void mv643xx_eth_free_tx_rings(struct net_device *dev)
964{ 987{
965 struct mv643xx_private *mp = netdev_priv(dev); 988 struct mv643xx_private *mp = netdev_priv(dev);
966 unsigned int port_num = mp->port_num;
967 unsigned int curr;
968 struct sk_buff *skb;
969 989
970 /* Stop Tx Queues */ 990 /* Stop Tx Queues */
971 mv643xx_eth_port_disable_tx(port_num); 991 mv643xx_eth_port_disable_tx(mp->port_num);
972 992
973 /* Free outstanding skb's on TX rings */ 993 /* Free outstanding skb's on TX ring */
974 for (curr = 0; mp->tx_desc_count && curr < mp->tx_ring_size; curr++) { 994 mv643xx_eth_free_all_tx_descs(dev);
975 skb = mp->tx_skb[curr]; 995
976 if (skb) { 996 BUG_ON(mp->tx_used_desc_q != mp->tx_curr_desc_q);
977 mp->tx_desc_count -= skb_shinfo(skb)->nr_frags;
978 dev_kfree_skb(skb);
979 mp->tx_desc_count--;
980 }
981 }
982 if (mp->tx_desc_count)
983 printk("%s: Error on Tx descriptor free - could not free %d"
984 " descriptors\n", dev->name, mp->tx_desc_count);
985 997
986 /* Free TX ring */ 998 /* Free TX ring */
987 if (mp->tx_sram_size) 999 if (mp->tx_sram_size)
@@ -1062,30 +1074,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
1062} 1074}
1063 1075
1064#ifdef MV643XX_NAPI 1076#ifdef MV643XX_NAPI
1065static void mv643xx_tx(struct net_device *dev)
1066{
1067 struct mv643xx_private *mp = netdev_priv(dev);
1068 struct pkt_info pkt_info;
1069
1070 while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
1071 if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
1072 dma_unmap_single(NULL, pkt_info.buf_ptr,
1073 pkt_info.byte_cnt,
1074 DMA_TO_DEVICE);
1075 else
1076 dma_unmap_page(NULL, pkt_info.buf_ptr,
1077 pkt_info.byte_cnt,
1078 DMA_TO_DEVICE);
1079
1080 if (pkt_info.return_info)
1081 dev_kfree_skb_irq(pkt_info.return_info);
1082 }
1083
1084 if (netif_queue_stopped(dev) &&
1085 mp->tx_ring_size - mp->tx_desc_count > MAX_DESCS_PER_SKB)
1086 netif_wake_queue(dev);
1087}
1088
1089/* 1077/*
1090 * mv643xx_poll 1078 * mv643xx_poll
1091 * 1079 *
@@ -1099,7 +1087,7 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1099 1087
1100#ifdef MV643XX_TX_FAST_REFILL 1088#ifdef MV643XX_TX_FAST_REFILL
1101 if (++mp->tx_clean_threshold > 5) { 1089 if (++mp->tx_clean_threshold > 5) {
1102 mv643xx_tx(dev); 1090 mv643xx_eth_free_completed_tx_descs(dev);
1103 mp->tx_clean_threshold = 0; 1091 mp->tx_clean_threshold = 0;
1104 } 1092 }
1105#endif 1093#endif
@@ -1156,11 +1144,9 @@ static int eth_alloc_tx_desc_index(struct mv643xx_private *mp)
1156{ 1144{
1157 int tx_desc_curr; 1145 int tx_desc_curr;
1158 1146
1159 tx_desc_curr = mp->tx_curr_desc_q;
1160
1161 BUG_ON(mp->tx_desc_count >= mp->tx_ring_size); 1147 BUG_ON(mp->tx_desc_count >= mp->tx_ring_size);
1162 mp->tx_desc_count++;
1163 1148
1149 tx_desc_curr = mp->tx_curr_desc_q;
1164 mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size; 1150 mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size;
1165 1151
1166 BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q); 1152 BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q);
@@ -1180,7 +1166,6 @@ static void eth_tx_fill_frag_descs(struct mv643xx_private *mp,
1180 int frag; 1166 int frag;
1181 int tx_index; 1167 int tx_index;
1182 struct eth_tx_desc *desc; 1168 struct eth_tx_desc *desc;
1183 struct net_device_stats *stats = &mp->stats;
1184 1169
1185 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 1170 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1186 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 1171 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
@@ -1205,7 +1190,6 @@ static void eth_tx_fill_frag_descs(struct mv643xx_private *mp,
1205 this_frag->page_offset, 1190 this_frag->page_offset,
1206 this_frag->size, 1191 this_frag->size,
1207 DMA_TO_DEVICE); 1192 DMA_TO_DEVICE);
1208 stats->tx_bytes += this_frag->size;
1209 } 1193 }
1210} 1194}
1211 1195
@@ -1215,21 +1199,21 @@ static void eth_tx_fill_frag_descs(struct mv643xx_private *mp,
1215 * Ensure the data for an skb to be transmitted is mapped properly, 1199 * Ensure the data for an skb to be transmitted is mapped properly,
1216 * then fill in descriptors in the tx hw queue and start the hardware. 1200 * then fill in descriptors in the tx hw queue and start the hardware.
1217 */ 1201 */
1218static int eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, 1202static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1219 struct sk_buff *skb) 1203 struct sk_buff *skb)
1220{ 1204{
1221 int tx_index; 1205 int tx_index;
1222 struct eth_tx_desc *desc; 1206 struct eth_tx_desc *desc;
1223 u32 cmd_sts; 1207 u32 cmd_sts;
1224 int length; 1208 int length;
1225 int tx_bytes = 0; 1209 int nr_frags = skb_shinfo(skb)->nr_frags;
1226 1210
1227 cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA; 1211 cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA;
1228 1212
1229 tx_index = eth_alloc_tx_desc_index(mp); 1213 tx_index = eth_alloc_tx_desc_index(mp);
1230 desc = &mp->p_tx_desc_area[tx_index]; 1214 desc = &mp->p_tx_desc_area[tx_index];
1231 1215
1232 if (skb_shinfo(skb)->nr_frags) { 1216 if (nr_frags) {
1233 eth_tx_fill_frag_descs(mp, skb); 1217 eth_tx_fill_frag_descs(mp, skb);
1234 1218
1235 length = skb_headlen(skb); 1219 length = skb_headlen(skb);
@@ -1244,7 +1228,6 @@ static int eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1244 1228
1245 desc->byte_cnt = length; 1229 desc->byte_cnt = length;
1246 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); 1230 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
1247 tx_bytes += length;
1248 1231
1249 if (skb->ip_summed == CHECKSUM_HW) { 1232 if (skb->ip_summed == CHECKSUM_HW) {
1250 BUG_ON(skb->protocol != ETH_P_IP); 1233 BUG_ON(skb->protocol != ETH_P_IP);
@@ -1276,9 +1259,9 @@ static int eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1276 1259
1277 /* ensure all descriptors are written before poking hardware */ 1260 /* ensure all descriptors are written before poking hardware */
1278 wmb(); 1261 wmb();
1279 mv643xx_eth_port_enable_tx(mp->port_num, mp->port_tx_queue_command); 1262 mv643xx_eth_port_enable_tx(mp->port_num, ETH_TX_QUEUES_ENABLED);
1280 1263
1281 return tx_bytes; 1264 mp->tx_desc_count += nr_frags + 1;
1282} 1265}
1283 1266
1284/** 1267/**
@@ -1306,7 +1289,8 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1306 1289
1307 spin_lock_irqsave(&mp->lock, flags); 1290 spin_lock_irqsave(&mp->lock, flags);
1308 1291
1309 stats->tx_bytes = eth_tx_submit_descs_for_skb(mp, skb); 1292 eth_tx_submit_descs_for_skb(mp, skb);
1293 stats->tx_bytes = skb->len;
1310 stats->tx_packets++; 1294 stats->tx_packets++;
1311 dev->trans_start = jiffies; 1295 dev->trans_start = jiffies;
1312 1296
@@ -1893,7 +1877,7 @@ static void eth_port_start(struct net_device *dev)
1893 MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE); 1877 MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE);
1894 1878
1895 /* Enable port Rx. */ 1879 /* Enable port Rx. */
1896 mv643xx_eth_port_enable_rx(port_num, mp->port_rx_queue_command); 1880 mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED);
1897 1881
1898 /* Disable port bandwidth limits by clearing MTU register */ 1882 /* Disable port bandwidth limits by clearing MTU register */
1899 mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); 1883 mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0);
@@ -2602,79 +2586,6 @@ static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location,
2602} 2586}
2603 2587
2604/* 2588/*
2605 * eth_tx_return_desc - Free all used Tx descriptors
2606 *
2607 * DESCRIPTION:
2608 * This routine returns the transmitted packet information to the caller.
2609 * It uses the 'first' index to support Tx desc return in case a transmit
2610 * of a packet spanned over multiple buffer still in process.
2611 * In case the Tx queue was in "resource error" condition, where there are
2612 * no available Tx resources, the function resets the resource error flag.
2613 *
2614 * INPUT:
2615 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2616 * struct pkt_info *p_pkt_info User packet buffer.
2617 *
2618 * OUTPUT:
2619 * Tx ring 'first' and 'used' indexes are updated.
2620 *
2621 * RETURN:
2622 * ETH_OK on success
2623 * ETH_ERROR otherwise.
2624 *
2625 */
2626static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
2627 struct pkt_info *p_pkt_info)
2628{
2629 int tx_desc_used;
2630 struct eth_tx_desc *p_tx_desc_used;
2631 unsigned int command_status;
2632 unsigned long flags;
2633 int err = ETH_OK;
2634
2635 spin_lock_irqsave(&mp->lock, flags);
2636
2637 BUG_ON(mp->tx_desc_count < 0);
2638 if (mp->tx_desc_count == 0) {
2639 /* no more tx descs in use */
2640 err = ETH_ERROR;
2641 goto out;
2642 }
2643
2644 /* Get the Tx Desc ring indexes */
2645 tx_desc_used = mp->tx_used_desc_q;
2646
2647 p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
2648
2649 BUG_ON(p_tx_desc_used == NULL);
2650
2651 command_status = p_tx_desc_used->cmd_sts;
2652 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2653 /* Still transmitting... */
2654 err = ETH_ERROR;
2655 goto out;
2656 }
2657
2658 /* Pass the packet information to the caller */
2659 p_pkt_info->cmd_sts = command_status;
2660 p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
2661 p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr;
2662 p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt;
2663 mp->tx_skb[tx_desc_used] = NULL;
2664
2665 /* Update the next descriptor to release. */
2666 mp->tx_used_desc_q = (tx_desc_used + 1) % mp->tx_ring_size;
2667
2668 BUG_ON(mp->tx_desc_count == 0);
2669 mp->tx_desc_count--;
2670
2671out:
2672 spin_unlock_irqrestore(&mp->lock, flags);
2673
2674 return err;
2675}
2676
2677/*
2678 * eth_port_receive - Get received information from Rx ring. 2589 * eth_port_receive - Get received information from Rx ring.
2679 * 2590 *
2680 * DESCRIPTION: 2591 * DESCRIPTION:
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 8768e1ba45df..cade2705423c 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -321,8 +321,6 @@ struct mv643xx_mib_counters {
321 321
322struct mv643xx_private { 322struct mv643xx_private {
323 int port_num; /* User Ethernet port number */ 323 int port_num; /* User Ethernet port number */
324 u32 port_tx_queue_command; /* Port active Tx queues summary*/
325 u32 port_rx_queue_command; /* Port active Rx queues summary*/
326 324
327 u32 rx_sram_addr; /* Base address of rx sram area */ 325 u32 rx_sram_addr; /* Base address of rx sram area */
328 u32 rx_sram_size; /* Size of rx sram area */ 326 u32 rx_sram_size; /* Size of rx sram area */
@@ -411,8 +409,6 @@ static void eth_port_read_smi_reg(unsigned int eth_port_num,
411static void eth_clear_mib_counters(unsigned int eth_port_num); 409static void eth_clear_mib_counters(unsigned int eth_port_num);
412 410
413/* Port data flow control routines */ 411/* Port data flow control routines */
414static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
415 struct pkt_info *p_pkt_info);
416static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, 412static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
417 struct pkt_info *p_pkt_info); 413 struct pkt_info *p_pkt_info);
418static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, 414static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,