aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/mv643xx_eth.c156
1 files changed, 78 insertions, 78 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index f23e82c84aee..ac7fc7a53678 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -323,21 +323,21 @@ struct mv643xx_eth_private {
323 /* Tx/Rx rings managment indexes fields. For driver use */ 323 /* Tx/Rx rings managment indexes fields. For driver use */
324 324
325 /* Next available and first returning Rx resource */ 325 /* Next available and first returning Rx resource */
326 int rx_curr_desc_q, rx_used_desc_q; 326 int rx_curr_desc, rx_used_desc;
327 327
328 /* Next available and first returning Tx resource */ 328 /* Next available and first returning Tx resource */
329 int tx_curr_desc_q, tx_used_desc_q; 329 int tx_curr_desc, tx_used_desc;
330 330
331#ifdef MV643XX_ETH_TX_FAST_REFILL 331#ifdef MV643XX_ETH_TX_FAST_REFILL
332 u32 tx_clean_threshold; 332 u32 tx_clean_threshold;
333#endif 333#endif
334 334
335 struct rx_desc *p_rx_desc_area; 335 struct rx_desc *rx_desc_area;
336 dma_addr_t rx_desc_dma; 336 dma_addr_t rx_desc_dma;
337 int rx_desc_area_size; 337 int rx_desc_area_size;
338 struct sk_buff **rx_skb; 338 struct sk_buff **rx_skb;
339 339
340 struct tx_desc *p_tx_desc_area; 340 struct tx_desc *tx_desc_area;
341 dma_addr_t tx_desc_dma; 341 dma_addr_t tx_desc_dma;
342 int tx_desc_area_size; 342 int tx_desc_area_size;
343 struct sk_buff **tx_skb; 343 struct sk_buff **tx_skb;
@@ -444,31 +444,31 @@ static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_eth_private *mp)
444static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev); 444static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev);
445 445
446static FUNC_RET_STATUS rx_return_buff(struct mv643xx_eth_private *mp, 446static FUNC_RET_STATUS rx_return_buff(struct mv643xx_eth_private *mp,
447 struct pkt_info *p_pkt_info) 447 struct pkt_info *pkt_info)
448{ 448{
449 int used_rx_desc; /* Where to return Rx resource */ 449 int used_rx_desc; /* Where to return Rx resource */
450 volatile struct rx_desc *p_used_rx_desc; 450 volatile struct rx_desc *rx_desc;
451 unsigned long flags; 451 unsigned long flags;
452 452
453 spin_lock_irqsave(&mp->lock, flags); 453 spin_lock_irqsave(&mp->lock, flags);
454 454
455 /* Get 'used' Rx descriptor */ 455 /* Get 'used' Rx descriptor */
456 used_rx_desc = mp->rx_used_desc_q; 456 used_rx_desc = mp->rx_used_desc;
457 p_used_rx_desc = &mp->p_rx_desc_area[used_rx_desc]; 457 rx_desc = &mp->rx_desc_area[used_rx_desc];
458 458
459 p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr; 459 rx_desc->buf_ptr = pkt_info->buf_ptr;
460 p_used_rx_desc->buf_size = p_pkt_info->byte_cnt; 460 rx_desc->buf_size = pkt_info->byte_cnt;
461 mp->rx_skb[used_rx_desc] = p_pkt_info->return_info; 461 mp->rx_skb[used_rx_desc] = pkt_info->return_info;
462 462
463 /* Flush the write pipe */ 463 /* Flush the write pipe */
464 464
465 /* Return the descriptor to DMA ownership */ 465 /* Return the descriptor to DMA ownership */
466 wmb(); 466 wmb();
467 p_used_rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; 467 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
468 wmb(); 468 wmb();
469 469
470 /* Move the used descriptor pointer to the next descriptor */ 470 /* Move the used descriptor pointer to the next descriptor */
471 mp->rx_used_desc_q = (used_rx_desc + 1) % mp->rx_ring_size; 471 mp->rx_used_desc = (used_rx_desc + 1) % mp->rx_ring_size;
472 472
473 spin_unlock_irqrestore(&mp->lock, flags); 473 spin_unlock_irqrestore(&mp->lock, flags);
474 474
@@ -519,23 +519,23 @@ static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
519} 519}
520 520
521static FUNC_RET_STATUS port_receive(struct mv643xx_eth_private *mp, 521static FUNC_RET_STATUS port_receive(struct mv643xx_eth_private *mp,
522 struct pkt_info *p_pkt_info) 522 struct pkt_info *pkt_info)
523{ 523{
524 int rx_next_curr_desc, rx_curr_desc, rx_used_desc; 524 int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
525 volatile struct rx_desc *p_rx_desc; 525 volatile struct rx_desc *rx_desc;
526 unsigned int command_status; 526 unsigned int command_status;
527 unsigned long flags; 527 unsigned long flags;
528 528
529 spin_lock_irqsave(&mp->lock, flags); 529 spin_lock_irqsave(&mp->lock, flags);
530 530
531 /* Get the Rx Desc ring 'curr and 'used' indexes */ 531 /* Get the Rx Desc ring 'curr and 'used' indexes */
532 rx_curr_desc = mp->rx_curr_desc_q; 532 rx_curr_desc = mp->rx_curr_desc;
533 rx_used_desc = mp->rx_used_desc_q; 533 rx_used_desc = mp->rx_used_desc;
534 534
535 p_rx_desc = &mp->p_rx_desc_area[rx_curr_desc]; 535 rx_desc = &mp->rx_desc_area[rx_curr_desc];
536 536
537 /* The following parameters are used to save readings from memory */ 537 /* The following parameters are used to save readings from memory */
538 command_status = p_rx_desc->cmd_sts; 538 command_status = rx_desc->cmd_sts;
539 rmb(); 539 rmb();
540 540
541 /* Nothing to receive... */ 541 /* Nothing to receive... */
@@ -544,11 +544,11 @@ static FUNC_RET_STATUS port_receive(struct mv643xx_eth_private *mp,
544 return ETH_END_OF_JOB; 544 return ETH_END_OF_JOB;
545 } 545 }
546 546
547 p_pkt_info->byte_cnt = p_rx_desc->byte_cnt - ETH_HW_IP_ALIGN; 547 pkt_info->byte_cnt = rx_desc->byte_cnt - ETH_HW_IP_ALIGN;
548 p_pkt_info->cmd_sts = command_status; 548 pkt_info->cmd_sts = command_status;
549 p_pkt_info->buf_ptr = p_rx_desc->buf_ptr + ETH_HW_IP_ALIGN; 549 pkt_info->buf_ptr = rx_desc->buf_ptr + ETH_HW_IP_ALIGN;
550 p_pkt_info->return_info = mp->rx_skb[rx_curr_desc]; 550 pkt_info->return_info = mp->rx_skb[rx_curr_desc];
551 p_pkt_info->l4i_chk = p_rx_desc->buf_size; 551 pkt_info->l4i_chk = rx_desc->buf_size;
552 552
553 /* 553 /*
554 * Clean the return info field to indicate that the 554 * Clean the return info field to indicate that the
@@ -558,7 +558,7 @@ static FUNC_RET_STATUS port_receive(struct mv643xx_eth_private *mp,
558 558
559 /* Update current index in data structure */ 559 /* Update current index in data structure */
560 rx_next_curr_desc = (rx_curr_desc + 1) % mp->rx_ring_size; 560 rx_next_curr_desc = (rx_curr_desc + 1) % mp->rx_ring_size;
561 mp->rx_curr_desc_q = rx_next_curr_desc; 561 mp->rx_curr_desc = rx_next_curr_desc;
562 562
563 spin_unlock_irqrestore(&mp->lock, flags); 563 spin_unlock_irqrestore(&mp->lock, flags);
564 564
@@ -650,7 +650,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
650 650
651 work_done = 0; 651 work_done = 0;
652 if ((rdl(mp, RXQ_CURRENT_DESC_PTR(port_num))) 652 if ((rdl(mp, RXQ_CURRENT_DESC_PTR(port_num)))
653 != (u32) mp->rx_used_desc_q) 653 != (u32) mp->rx_used_desc)
654 work_done = mv643xx_eth_receive_queue(dev, budget); 654 work_done = mv643xx_eth_receive_queue(dev, budget);
655 655
656 if (work_done < budget) { 656 if (work_done < budget) {
@@ -685,10 +685,10 @@ static int alloc_tx_desc_index(struct mv643xx_eth_private *mp)
685 685
686 BUG_ON(mp->tx_desc_count >= mp->tx_ring_size); 686 BUG_ON(mp->tx_desc_count >= mp->tx_ring_size);
687 687
688 tx_desc_curr = mp->tx_curr_desc_q; 688 tx_desc_curr = mp->tx_curr_desc;
689 mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size; 689 mp->tx_curr_desc = (tx_desc_curr + 1) % mp->tx_ring_size;
690 690
691 BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q); 691 BUG_ON(mp->tx_curr_desc == mp->tx_used_desc);
692 692
693 return tx_desc_curr; 693 return tx_desc_curr;
694} 694}
@@ -704,7 +704,7 @@ static void tx_fill_frag_descs(struct mv643xx_eth_private *mp,
704 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 704 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
705 705
706 tx_index = alloc_tx_desc_index(mp); 706 tx_index = alloc_tx_desc_index(mp);
707 desc = &mp->p_tx_desc_area[tx_index]; 707 desc = &mp->tx_desc_area[tx_index];
708 708
709 desc->cmd_sts = BUFFER_OWNED_BY_DMA; 709 desc->cmd_sts = BUFFER_OWNED_BY_DMA;
710 /* Last Frag enables interrupt and frees the skb */ 710 /* Last Frag enables interrupt and frees the skb */
@@ -716,7 +716,7 @@ static void tx_fill_frag_descs(struct mv643xx_eth_private *mp,
716 } else 716 } else
717 mp->tx_skb[tx_index] = NULL; 717 mp->tx_skb[tx_index] = NULL;
718 718
719 desc = &mp->p_tx_desc_area[tx_index]; 719 desc = &mp->tx_desc_area[tx_index];
720 desc->l4i_chk = 0; 720 desc->l4i_chk = 0;
721 desc->byte_cnt = this_frag->size; 721 desc->byte_cnt = this_frag->size;
722 desc->buf_ptr = dma_map_page(NULL, this_frag->page, 722 desc->buf_ptr = dma_map_page(NULL, this_frag->page,
@@ -743,7 +743,7 @@ static void tx_submit_descs_for_skb(struct mv643xx_eth_private *mp,
743 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 743 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
744 744
745 tx_index = alloc_tx_desc_index(mp); 745 tx_index = alloc_tx_desc_index(mp);
746 desc = &mp->p_tx_desc_area[tx_index]; 746 desc = &mp->tx_desc_area[tx_index];
747 747
748 if (nr_frags) { 748 if (nr_frags) {
749 tx_fill_frag_descs(mp, skb); 749 tx_fill_frag_descs(mp, skb);
@@ -1113,7 +1113,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1113 1113
1114 1114
1115/* address handling *********************************************************/ 1115/* address handling *********************************************************/
1116static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *p_addr) 1116static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1117{ 1117{
1118 unsigned int port_num = mp->port_num; 1118 unsigned int port_num = mp->port_num;
1119 unsigned int mac_h; 1119 unsigned int mac_h;
@@ -1122,12 +1122,12 @@ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *p_addr)
1122 mac_h = rdl(mp, MAC_ADDR_HIGH(port_num)); 1122 mac_h = rdl(mp, MAC_ADDR_HIGH(port_num));
1123 mac_l = rdl(mp, MAC_ADDR_LOW(port_num)); 1123 mac_l = rdl(mp, MAC_ADDR_LOW(port_num));
1124 1124
1125 p_addr[0] = (mac_h >> 24) & 0xff; 1125 addr[0] = (mac_h >> 24) & 0xff;
1126 p_addr[1] = (mac_h >> 16) & 0xff; 1126 addr[1] = (mac_h >> 16) & 0xff;
1127 p_addr[2] = (mac_h >> 8) & 0xff; 1127 addr[2] = (mac_h >> 8) & 0xff;
1128 p_addr[3] = mac_h & 0xff; 1128 addr[3] = mac_h & 0xff;
1129 p_addr[4] = (mac_l >> 8) & 0xff; 1129 addr[4] = (mac_l >> 8) & 0xff;
1130 p_addr[5] = mac_l & 0xff; 1130 addr[5] = mac_l & 0xff;
1131} 1131}
1132 1132
1133static void init_mac_tables(struct mv643xx_eth_private *mp) 1133static void init_mac_tables(struct mv643xx_eth_private *mp)
@@ -1163,23 +1163,23 @@ static void set_filter_table_entry(struct mv643xx_eth_private *mp,
1163 wrl(mp, table + tbl_offset, table_reg); 1163 wrl(mp, table + tbl_offset, table_reg);
1164} 1164}
1165 1165
1166static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *p_addr) 1166static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1167{ 1167{
1168 unsigned int port_num = mp->port_num; 1168 unsigned int port_num = mp->port_num;
1169 unsigned int mac_h; 1169 unsigned int mac_h;
1170 unsigned int mac_l; 1170 unsigned int mac_l;
1171 int table; 1171 int table;
1172 1172
1173 mac_l = (p_addr[4] << 8) | (p_addr[5]); 1173 mac_l = (addr[4] << 8) | (addr[5]);
1174 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | 1174 mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1175 (p_addr[3] << 0); 1175 (addr[3] << 0);
1176 1176
1177 wrl(mp, MAC_ADDR_LOW(port_num), mac_l); 1177 wrl(mp, MAC_ADDR_LOW(port_num), mac_l);
1178 wrl(mp, MAC_ADDR_HIGH(port_num), mac_h); 1178 wrl(mp, MAC_ADDR_HIGH(port_num), mac_h);
1179 1179
1180 /* Accept frames with this address */ 1180 /* Accept frames with this address */
1181 table = UNICAST_TABLE(port_num); 1181 table = UNICAST_TABLE(port_num);
1182 set_filter_table_entry(mp, table, p_addr[5] & 0x0f); 1182 set_filter_table_entry(mp, table, addr[5] & 0x0f);
1183} 1183}
1184 1184
1185static void mv643xx_eth_update_mac_address(struct net_device *dev) 1185static void mv643xx_eth_update_mac_address(struct net_device *dev)
@@ -1201,7 +1201,7 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1201 return 0; 1201 return 0;
1202} 1202}
1203 1203
1204static void mc_addr(struct mv643xx_eth_private *mp, unsigned char *p_addr) 1204static void mc_addr(struct mv643xx_eth_private *mp, unsigned char *addr)
1205{ 1205{
1206 unsigned int port_num = mp->port_num; 1206 unsigned int port_num = mp->port_num;
1207 unsigned int mac_h; 1207 unsigned int mac_h;
@@ -1212,17 +1212,17 @@ static void mc_addr(struct mv643xx_eth_private *mp, unsigned char *p_addr)
1212 int crc[8]; 1212 int crc[8];
1213 int i; 1213 int i;
1214 1214
1215 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && 1215 if ((addr[0] == 0x01) && (addr[1] == 0x00) &&
1216 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { 1216 (addr[2] == 0x5E) && (addr[3] == 0x00) && (addr[4] == 0x00)) {
1217 table = SPECIAL_MCAST_TABLE(port_num); 1217 table = SPECIAL_MCAST_TABLE(port_num);
1218 set_filter_table_entry(mp, table, p_addr[5]); 1218 set_filter_table_entry(mp, table, addr[5]);
1219 return; 1219 return;
1220 } 1220 }
1221 1221
1222 /* Calculate CRC-8 out of the given address */ 1222 /* Calculate CRC-8 out of the given address */
1223 mac_h = (p_addr[0] << 8) | (p_addr[1]); 1223 mac_h = (addr[0] << 8) | (addr[1]);
1224 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) | 1224 mac_l = (addr[2] << 24) | (addr[3] << 16) |
1225 (p_addr[4] << 8) | (p_addr[5] << 0); 1225 (addr[4] << 8) | (addr[5] << 0);
1226 1226
1227 for (i = 0; i < 32; i++) 1227 for (i = 0; i < 32; i++)
1228 mac_array[i] = (mac_l >> i) & 0x1; 1228 mac_array[i] = (mac_l >> i) & 0x1;
@@ -1372,15 +1372,15 @@ static void ether_init_rx_desc_ring(struct mv643xx_eth_private *mp)
1372 int i; 1372 int i;
1373 1373
1374 /* initialize the next_desc_ptr links in the Rx descriptors ring */ 1374 /* initialize the next_desc_ptr links in the Rx descriptors ring */
1375 p_rx_desc = (struct rx_desc *)mp->p_rx_desc_area; 1375 p_rx_desc = (struct rx_desc *)mp->rx_desc_area;
1376 for (i = 0; i < rx_desc_num; i++) { 1376 for (i = 0; i < rx_desc_num; i++) {
1377 p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma + 1377 p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +
1378 ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); 1378 ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
1379 } 1379 }
1380 1380
1381 /* Save Rx desc pointer to driver struct. */ 1381 /* Save Rx desc pointer to driver struct. */
1382 mp->rx_curr_desc_q = 0; 1382 mp->rx_curr_desc = 0;
1383 mp->rx_used_desc_q = 0; 1383 mp->rx_used_desc = 0;
1384 1384
1385 mp->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); 1385 mp->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1386} 1386}
@@ -1408,10 +1408,10 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev)
1408 mp->rx_desc_count); 1408 mp->rx_desc_count);
1409 /* Free RX ring */ 1409 /* Free RX ring */
1410 if (mp->rx_sram_size) 1410 if (mp->rx_sram_size)
1411 iounmap(mp->p_rx_desc_area); 1411 iounmap(mp->rx_desc_area);
1412 else 1412 else
1413 dma_free_coherent(NULL, mp->rx_desc_area_size, 1413 dma_free_coherent(NULL, mp->rx_desc_area_size,
1414 mp->p_rx_desc_area, mp->rx_desc_dma); 1414 mp->rx_desc_area, mp->rx_desc_dma);
1415} 1415}
1416 1416
1417static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp) 1417static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp)
@@ -1421,14 +1421,14 @@ static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp)
1421 int i; 1421 int i;
1422 1422
1423 /* Initialize the next_desc_ptr links in the Tx descriptors ring */ 1423 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
1424 p_tx_desc = (struct tx_desc *)mp->p_tx_desc_area; 1424 p_tx_desc = (struct tx_desc *)mp->tx_desc_area;
1425 for (i = 0; i < tx_desc_num; i++) { 1425 for (i = 0; i < tx_desc_num; i++) {
1426 p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma + 1426 p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +
1427 ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); 1427 ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
1428 } 1428 }
1429 1429
1430 mp->tx_curr_desc_q = 0; 1430 mp->tx_curr_desc = 0;
1431 mp->tx_used_desc_q = 0; 1431 mp->tx_used_desc = 0;
1432 1432
1433 mp->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); 1433 mp->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1434} 1434}
@@ -1454,8 +1454,8 @@ static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
1454 return released; 1454 return released;
1455 } 1455 }
1456 1456
1457 tx_index = mp->tx_used_desc_q; 1457 tx_index = mp->tx_used_desc;
1458 desc = &mp->p_tx_desc_area[tx_index]; 1458 desc = &mp->tx_desc_area[tx_index];
1459 cmd_sts = desc->cmd_sts; 1459 cmd_sts = desc->cmd_sts;
1460 1460
1461 if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA)) { 1461 if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA)) {
@@ -1463,7 +1463,7 @@ static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
1463 return released; 1463 return released;
1464 } 1464 }
1465 1465
1466 mp->tx_used_desc_q = (tx_index + 1) % mp->tx_ring_size; 1466 mp->tx_used_desc = (tx_index + 1) % mp->tx_ring_size;
1467 mp->tx_desc_count--; 1467 mp->tx_desc_count--;
1468 1468
1469 addr = desc->buf_ptr; 1469 addr = desc->buf_ptr;
@@ -1517,14 +1517,14 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
1517 /* Free outstanding skb's on TX ring */ 1517 /* Free outstanding skb's on TX ring */
1518 mv643xx_eth_free_all_tx_descs(dev); 1518 mv643xx_eth_free_all_tx_descs(dev);
1519 1519
1520 BUG_ON(mp->tx_used_desc_q != mp->tx_curr_desc_q); 1520 BUG_ON(mp->tx_used_desc != mp->tx_curr_desc);
1521 1521
1522 /* Free TX ring */ 1522 /* Free TX ring */
1523 if (mp->tx_sram_size) 1523 if (mp->tx_sram_size)
1524 iounmap(mp->p_tx_desc_area); 1524 iounmap(mp->tx_desc_area);
1525 else 1525 else
1526 dma_free_coherent(NULL, mp->tx_desc_area_size, 1526 dma_free_coherent(NULL, mp->tx_desc_area_size,
1527 mp->p_tx_desc_area, mp->tx_desc_dma); 1527 mp->tx_desc_area, mp->tx_desc_dma);
1528} 1528}
1529 1529
1530 1530
@@ -1663,12 +1663,12 @@ static void port_start(struct net_device *dev)
1663 struct ethtool_cmd ethtool_cmd; 1663 struct ethtool_cmd ethtool_cmd;
1664 1664
1665 /* Assignment of Tx CTRP of given queue */ 1665 /* Assignment of Tx CTRP of given queue */
1666 tx_curr_desc = mp->tx_curr_desc_q; 1666 tx_curr_desc = mp->tx_curr_desc;
1667 wrl(mp, TXQ_CURRENT_DESC_PTR(port_num), 1667 wrl(mp, TXQ_CURRENT_DESC_PTR(port_num),
1668 (u32)((struct tx_desc *)mp->tx_desc_dma + tx_curr_desc)); 1668 (u32)((struct tx_desc *)mp->tx_desc_dma + tx_curr_desc));
1669 1669
1670 /* Assignment of Rx CRDP of given queue */ 1670 /* Assignment of Rx CRDP of given queue */
1671 rx_curr_desc = mp->rx_curr_desc_q; 1671 rx_curr_desc = mp->rx_curr_desc;
1672 wrl(mp, RXQ_CURRENT_DESC_PTR(port_num), 1672 wrl(mp, RXQ_CURRENT_DESC_PTR(port_num),
1673 (u32)((struct rx_desc *)mp->rx_desc_dma + rx_curr_desc)); 1673 (u32)((struct rx_desc *)mp->rx_desc_dma + rx_curr_desc));
1674 1674
@@ -1800,22 +1800,22 @@ static int mv643xx_eth_open(struct net_device *dev)
1800 mp->tx_desc_area_size = size; 1800 mp->tx_desc_area_size = size;
1801 1801
1802 if (mp->tx_sram_size) { 1802 if (mp->tx_sram_size) {
1803 mp->p_tx_desc_area = ioremap(mp->tx_sram_addr, 1803 mp->tx_desc_area = ioremap(mp->tx_sram_addr,
1804 mp->tx_sram_size); 1804 mp->tx_sram_size);
1805 mp->tx_desc_dma = mp->tx_sram_addr; 1805 mp->tx_desc_dma = mp->tx_sram_addr;
1806 } else 1806 } else
1807 mp->p_tx_desc_area = dma_alloc_coherent(NULL, size, 1807 mp->tx_desc_area = dma_alloc_coherent(NULL, size,
1808 &mp->tx_desc_dma, 1808 &mp->tx_desc_dma,
1809 GFP_KERNEL); 1809 GFP_KERNEL);
1810 1810
1811 if (!mp->p_tx_desc_area) { 1811 if (!mp->tx_desc_area) {
1812 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", 1812 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
1813 dev->name, size); 1813 dev->name, size);
1814 err = -ENOMEM; 1814 err = -ENOMEM;
1815 goto out_free_tx_skb; 1815 goto out_free_tx_skb;
1816 } 1816 }
1817 BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */ 1817 BUG_ON((u32) mp->tx_desc_area & 0xf); /* check 16-byte alignment */
1818 memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size); 1818 memset((void *)mp->tx_desc_area, 0, mp->tx_desc_area_size);
1819 1819
1820 ether_init_tx_desc_ring(mp); 1820 ether_init_tx_desc_ring(mp);
1821 1821
@@ -1825,28 +1825,28 @@ static int mv643xx_eth_open(struct net_device *dev)
1825 mp->rx_desc_area_size = size; 1825 mp->rx_desc_area_size = size;
1826 1826
1827 if (mp->rx_sram_size) { 1827 if (mp->rx_sram_size) {
1828 mp->p_rx_desc_area = ioremap(mp->rx_sram_addr, 1828 mp->rx_desc_area = ioremap(mp->rx_sram_addr,
1829 mp->rx_sram_size); 1829 mp->rx_sram_size);
1830 mp->rx_desc_dma = mp->rx_sram_addr; 1830 mp->rx_desc_dma = mp->rx_sram_addr;
1831 } else 1831 } else
1832 mp->p_rx_desc_area = dma_alloc_coherent(NULL, size, 1832 mp->rx_desc_area = dma_alloc_coherent(NULL, size,
1833 &mp->rx_desc_dma, 1833 &mp->rx_desc_dma,
1834 GFP_KERNEL); 1834 GFP_KERNEL);
1835 1835
1836 if (!mp->p_rx_desc_area) { 1836 if (!mp->rx_desc_area) {
1837 printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n", 1837 printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
1838 dev->name, size); 1838 dev->name, size);
1839 printk(KERN_ERR "%s: Freeing previously allocated TX queues...", 1839 printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
1840 dev->name); 1840 dev->name);
1841 if (mp->rx_sram_size) 1841 if (mp->rx_sram_size)
1842 iounmap(mp->p_tx_desc_area); 1842 iounmap(mp->tx_desc_area);
1843 else 1843 else
1844 dma_free_coherent(NULL, mp->tx_desc_area_size, 1844 dma_free_coherent(NULL, mp->tx_desc_area_size,
1845 mp->p_tx_desc_area, mp->tx_desc_dma); 1845 mp->tx_desc_area, mp->tx_desc_dma);
1846 err = -ENOMEM; 1846 err = -ENOMEM;
1847 goto out_free_tx_skb; 1847 goto out_free_tx_skb;
1848 } 1848 }
1849 memset((void *)mp->p_rx_desc_area, 0, size); 1849 memset((void *)mp->rx_desc_area, 0, size);
1850 1850
1851 ether_init_rx_desc_ring(mp); 1851 ether_init_rx_desc_ring(mp);
1852 1852