aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale/fec_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/freescale/fec_main.c')
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c133
1 files changed, 70 insertions, 63 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 502da6f48f95..41c81f6ec630 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -332,11 +332,13 @@ static void fec_dump(struct net_device *ndev)
332 bdp = txq->tx_bd_base; 332 bdp = txq->tx_bd_base;
333 333
334 do { 334 do {
335 pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", 335 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
336 index, 336 index,
337 bdp == txq->cur_tx ? 'S' : ' ', 337 bdp == txq->cur_tx ? 'S' : ' ',
338 bdp == txq->dirty_tx ? 'H' : ' ', 338 bdp == txq->dirty_tx ? 'H' : ' ',
339 bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, 339 fec16_to_cpu(bdp->cbd_sc),
340 fec32_to_cpu(bdp->cbd_bufaddr),
341 fec16_to_cpu(bdp->cbd_datlen),
340 txq->tx_skbuff[index]); 342 txq->tx_skbuff[index]);
341 bdp = fec_enet_get_nextdesc(bdp, fep, 0); 343 bdp = fec_enet_get_nextdesc(bdp, fep, 0);
342 index++; 344 index++;
@@ -389,7 +391,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
389 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 391 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
390 ebdp = (struct bufdesc_ex *)bdp; 392 ebdp = (struct bufdesc_ex *)bdp;
391 393
392 status = bdp->cbd_sc; 394 status = fec16_to_cpu(bdp->cbd_sc);
393 status &= ~BD_ENET_TX_STATS; 395 status &= ~BD_ENET_TX_STATS;
394 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 396 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
395 frag_len = skb_shinfo(skb)->frags[frag].size; 397 frag_len = skb_shinfo(skb)->frags[frag].size;
@@ -411,7 +413,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
411 if (skb->ip_summed == CHECKSUM_PARTIAL) 413 if (skb->ip_summed == CHECKSUM_PARTIAL)
412 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 414 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
413 ebdp->cbd_bdu = 0; 415 ebdp->cbd_bdu = 0;
414 ebdp->cbd_esc = estatus; 416 ebdp->cbd_esc = cpu_to_fec32(estatus);
415 } 417 }
416 418
417 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; 419 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
@@ -435,9 +437,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
435 goto dma_mapping_error; 437 goto dma_mapping_error;
436 } 438 }
437 439
438 bdp->cbd_bufaddr = addr; 440 bdp->cbd_bufaddr = cpu_to_fec32(addr);
439 bdp->cbd_datlen = frag_len; 441 bdp->cbd_datlen = cpu_to_fec16(frag_len);
440 bdp->cbd_sc = status; 442 bdp->cbd_sc = cpu_to_fec16(status);
441 } 443 }
442 444
443 return bdp; 445 return bdp;
@@ -445,8 +447,8 @@ dma_mapping_error:
445 bdp = txq->cur_tx; 447 bdp = txq->cur_tx;
446 for (i = 0; i < frag; i++) { 448 for (i = 0; i < frag; i++) {
447 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 449 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
448 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 450 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
449 bdp->cbd_datlen, DMA_TO_DEVICE); 451 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
450 } 452 }
451 return ERR_PTR(-ENOMEM); 453 return ERR_PTR(-ENOMEM);
452} 454}
@@ -483,7 +485,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
483 /* Fill in a Tx ring entry */ 485 /* Fill in a Tx ring entry */
484 bdp = txq->cur_tx; 486 bdp = txq->cur_tx;
485 last_bdp = bdp; 487 last_bdp = bdp;
486 status = bdp->cbd_sc; 488 status = fec16_to_cpu(bdp->cbd_sc);
487 status &= ~BD_ENET_TX_STATS; 489 status &= ~BD_ENET_TX_STATS;
488 490
489 /* Set buffer length and buffer pointer */ 491 /* Set buffer length and buffer pointer */
@@ -539,21 +541,21 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
539 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 541 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
540 542
541 ebdp->cbd_bdu = 0; 543 ebdp->cbd_bdu = 0;
542 ebdp->cbd_esc = estatus; 544 ebdp->cbd_esc = cpu_to_fec32(estatus);
543 } 545 }
544 546
545 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); 547 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
546 /* Save skb pointer */ 548 /* Save skb pointer */
547 txq->tx_skbuff[index] = skb; 549 txq->tx_skbuff[index] = skb;
548 550
549 bdp->cbd_datlen = buflen; 551 bdp->cbd_datlen = cpu_to_fec16(buflen);
550 bdp->cbd_bufaddr = addr; 552 bdp->cbd_bufaddr = cpu_to_fec32(addr);
551 553
552 /* Send it on its way. Tell FEC it's ready, interrupt when done, 554 /* Send it on its way. Tell FEC it's ready, interrupt when done,
553 * it's the last BD of the frame, and to put the CRC on the end. 555 * it's the last BD of the frame, and to put the CRC on the end.
554 */ 556 */
555 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 557 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
556 bdp->cbd_sc = status; 558 bdp->cbd_sc = cpu_to_fec16(status);
557 559
558 /* If this was the last BD in the ring, start at the beginning again. */ 560 /* If this was the last BD in the ring, start at the beginning again. */
559 bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); 561 bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
@@ -585,7 +587,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
585 unsigned int estatus = 0; 587 unsigned int estatus = 0;
586 dma_addr_t addr; 588 dma_addr_t addr;
587 589
588 status = bdp->cbd_sc; 590 status = fec16_to_cpu(bdp->cbd_sc);
589 status &= ~BD_ENET_TX_STATS; 591 status &= ~BD_ENET_TX_STATS;
590 592
591 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 593 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
@@ -607,8 +609,8 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
607 return NETDEV_TX_BUSY; 609 return NETDEV_TX_BUSY;
608 } 610 }
609 611
610 bdp->cbd_datlen = size; 612 bdp->cbd_datlen = cpu_to_fec16(size);
611 bdp->cbd_bufaddr = addr; 613 bdp->cbd_bufaddr = cpu_to_fec32(addr);
612 614
613 if (fep->bufdesc_ex) { 615 if (fep->bufdesc_ex) {
614 if (fep->quirks & FEC_QUIRK_HAS_AVB) 616 if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -616,7 +618,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
616 if (skb->ip_summed == CHECKSUM_PARTIAL) 618 if (skb->ip_summed == CHECKSUM_PARTIAL)
617 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 619 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
618 ebdp->cbd_bdu = 0; 620 ebdp->cbd_bdu = 0;
619 ebdp->cbd_esc = estatus; 621 ebdp->cbd_esc = cpu_to_fec32(estatus);
620 } 622 }
621 623
622 /* Handle the last BD specially */ 624 /* Handle the last BD specially */
@@ -625,10 +627,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
625 if (is_last) { 627 if (is_last) {
626 status |= BD_ENET_TX_INTR; 628 status |= BD_ENET_TX_INTR;
627 if (fep->bufdesc_ex) 629 if (fep->bufdesc_ex)
628 ebdp->cbd_esc |= BD_ENET_TX_INT; 630 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
629 } 631 }
630 632
631 bdp->cbd_sc = status; 633 bdp->cbd_sc = cpu_to_fec16(status);
632 634
633 return 0; 635 return 0;
634} 636}
@@ -647,7 +649,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
647 unsigned short status; 649 unsigned short status;
648 unsigned int estatus = 0; 650 unsigned int estatus = 0;
649 651
650 status = bdp->cbd_sc; 652 status = fec16_to_cpu(bdp->cbd_sc);
651 status &= ~BD_ENET_TX_STATS; 653 status &= ~BD_ENET_TX_STATS;
652 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 654 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
653 655
@@ -671,8 +673,8 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
671 } 673 }
672 } 674 }
673 675
674 bdp->cbd_bufaddr = dmabuf; 676 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
675 bdp->cbd_datlen = hdr_len; 677 bdp->cbd_datlen = cpu_to_fec16(hdr_len);
676 678
677 if (fep->bufdesc_ex) { 679 if (fep->bufdesc_ex) {
678 if (fep->quirks & FEC_QUIRK_HAS_AVB) 680 if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -680,10 +682,10 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
680 if (skb->ip_summed == CHECKSUM_PARTIAL) 682 if (skb->ip_summed == CHECKSUM_PARTIAL)
681 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 683 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
682 ebdp->cbd_bdu = 0; 684 ebdp->cbd_bdu = 0;
683 ebdp->cbd_esc = estatus; 685 ebdp->cbd_esc = cpu_to_fec32(estatus);
684 } 686 }
685 687
686 bdp->cbd_sc = status; 688 bdp->cbd_sc = cpu_to_fec16(status);
687 689
688 return 0; 690 return 0;
689} 691}
@@ -823,15 +825,15 @@ static void fec_enet_bd_init(struct net_device *dev)
823 825
824 /* Initialize the BD for every fragment in the page. */ 826 /* Initialize the BD for every fragment in the page. */
825 if (bdp->cbd_bufaddr) 827 if (bdp->cbd_bufaddr)
826 bdp->cbd_sc = BD_ENET_RX_EMPTY; 828 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
827 else 829 else
828 bdp->cbd_sc = 0; 830 bdp->cbd_sc = cpu_to_fec16(0);
829 bdp = fec_enet_get_nextdesc(bdp, fep, q); 831 bdp = fec_enet_get_nextdesc(bdp, fep, q);
830 } 832 }
831 833
832 /* Set the last buffer to wrap */ 834 /* Set the last buffer to wrap */
833 bdp = fec_enet_get_prevdesc(bdp, fep, q); 835 bdp = fec_enet_get_prevdesc(bdp, fep, q);
834 bdp->cbd_sc |= BD_SC_WRAP; 836 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
835 837
836 rxq->cur_rx = rxq->rx_bd_base; 838 rxq->cur_rx = rxq->rx_bd_base;
837 } 839 }
@@ -844,18 +846,18 @@ static void fec_enet_bd_init(struct net_device *dev)
844 846
845 for (i = 0; i < txq->tx_ring_size; i++) { 847 for (i = 0; i < txq->tx_ring_size; i++) {
846 /* Initialize the BD for every fragment in the page. */ 848 /* Initialize the BD for every fragment in the page. */
847 bdp->cbd_sc = 0; 849 bdp->cbd_sc = cpu_to_fec16(0);
848 if (txq->tx_skbuff[i]) { 850 if (txq->tx_skbuff[i]) {
849 dev_kfree_skb_any(txq->tx_skbuff[i]); 851 dev_kfree_skb_any(txq->tx_skbuff[i]);
850 txq->tx_skbuff[i] = NULL; 852 txq->tx_skbuff[i] = NULL;
851 } 853 }
852 bdp->cbd_bufaddr = 0; 854 bdp->cbd_bufaddr = cpu_to_fec32(0);
853 bdp = fec_enet_get_nextdesc(bdp, fep, q); 855 bdp = fec_enet_get_nextdesc(bdp, fep, q);
854 } 856 }
855 857
856 /* Set the last buffer to wrap */ 858 /* Set the last buffer to wrap */
857 bdp = fec_enet_get_prevdesc(bdp, fep, q); 859 bdp = fec_enet_get_prevdesc(bdp, fep, q);
858 bdp->cbd_sc |= BD_SC_WRAP; 860 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
859 txq->dirty_tx = bdp; 861 txq->dirty_tx = bdp;
860 } 862 }
861} 863}
@@ -947,8 +949,10 @@ fec_restart(struct net_device *ndev)
947 */ 949 */
948 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 950 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
949 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); 951 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
950 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); 952 writel((__force u32)cpu_to_be32(temp_mac[0]),
951 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); 953 fep->hwp + FEC_ADDR_LOW);
954 writel((__force u32)cpu_to_be32(temp_mac[1]),
955 fep->hwp + FEC_ADDR_HIGH);
952 } 956 }
953 957
954 /* Clear any outstanding interrupt. */ 958 /* Clear any outstanding interrupt. */
@@ -1222,7 +1226,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1222 while (bdp != READ_ONCE(txq->cur_tx)) { 1226 while (bdp != READ_ONCE(txq->cur_tx)) {
1223 /* Order the load of cur_tx and cbd_sc */ 1227 /* Order the load of cur_tx and cbd_sc */
1224 rmb(); 1228 rmb();
1225 status = READ_ONCE(bdp->cbd_sc); 1229 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1226 if (status & BD_ENET_TX_READY) 1230 if (status & BD_ENET_TX_READY)
1227 break; 1231 break;
1228 1232
@@ -1230,10 +1234,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1230 1234
1231 skb = txq->tx_skbuff[index]; 1235 skb = txq->tx_skbuff[index];
1232 txq->tx_skbuff[index] = NULL; 1236 txq->tx_skbuff[index] = NULL;
1233 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) 1237 if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1234 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 1238 dma_unmap_single(&fep->pdev->dev,
1235 bdp->cbd_datlen, DMA_TO_DEVICE); 1239 fec32_to_cpu(bdp->cbd_bufaddr),
1236 bdp->cbd_bufaddr = 0; 1240 fec16_to_cpu(bdp->cbd_datlen),
1241 DMA_TO_DEVICE);
1242 bdp->cbd_bufaddr = cpu_to_fec32(0);
1237 if (!skb) { 1243 if (!skb) {
1238 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); 1244 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1239 continue; 1245 continue;
@@ -1264,7 +1270,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1264 struct skb_shared_hwtstamps shhwtstamps; 1270 struct skb_shared_hwtstamps shhwtstamps;
1265 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1271 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1266 1272
1267 fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); 1273 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1268 skb_tstamp_tx(skb, &shhwtstamps); 1274 skb_tstamp_tx(skb, &shhwtstamps);
1269 } 1275 }
1270 1276
@@ -1324,10 +1330,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
1324 if (off) 1330 if (off)
1325 skb_reserve(skb, fep->rx_align + 1 - off); 1331 skb_reserve(skb, fep->rx_align + 1 - off);
1326 1332
1327 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, 1333 bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
1328 FEC_ENET_RX_FRSIZE - fep->rx_align, 1334 if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1329 DMA_FROM_DEVICE);
1330 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
1331 if (net_ratelimit()) 1335 if (net_ratelimit())
1332 netdev_err(ndev, "Rx DMA memory map failed\n"); 1336 netdev_err(ndev, "Rx DMA memory map failed\n");
1333 return -ENOMEM; 1337 return -ENOMEM;
@@ -1349,7 +1353,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1349 if (!new_skb) 1353 if (!new_skb)
1350 return false; 1354 return false;
1351 1355
1352 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, 1356 dma_sync_single_for_cpu(&fep->pdev->dev,
1357 fec32_to_cpu(bdp->cbd_bufaddr),
1353 FEC_ENET_RX_FRSIZE - fep->rx_align, 1358 FEC_ENET_RX_FRSIZE - fep->rx_align,
1354 DMA_FROM_DEVICE); 1359 DMA_FROM_DEVICE);
1355 if (!swap) 1360 if (!swap)
@@ -1396,7 +1401,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1396 */ 1401 */
1397 bdp = rxq->cur_rx; 1402 bdp = rxq->cur_rx;
1398 1403
1399 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 1404 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1400 1405
1401 if (pkt_received >= budget) 1406 if (pkt_received >= budget)
1402 break; 1407 break;
@@ -1438,7 +1443,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1438 1443
1439 /* Process the incoming frame. */ 1444 /* Process the incoming frame. */
1440 ndev->stats.rx_packets++; 1445 ndev->stats.rx_packets++;
1441 pkt_len = bdp->cbd_datlen; 1446 pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1442 ndev->stats.rx_bytes += pkt_len; 1447 ndev->stats.rx_bytes += pkt_len;
1443 1448
1444 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); 1449 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
@@ -1456,7 +1461,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1456 ndev->stats.rx_dropped++; 1461 ndev->stats.rx_dropped++;
1457 goto rx_processing_done; 1462 goto rx_processing_done;
1458 } 1463 }
1459 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 1464 dma_unmap_single(&fep->pdev->dev,
1465 fec32_to_cpu(bdp->cbd_bufaddr),
1460 FEC_ENET_RX_FRSIZE - fep->rx_align, 1466 FEC_ENET_RX_FRSIZE - fep->rx_align,
1461 DMA_FROM_DEVICE); 1467 DMA_FROM_DEVICE);
1462 } 1468 }
@@ -1475,7 +1481,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1475 /* If this is a VLAN packet remove the VLAN Tag */ 1481 /* If this is a VLAN packet remove the VLAN Tag */
1476 vlan_packet_rcvd = false; 1482 vlan_packet_rcvd = false;
1477 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1483 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1478 fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { 1484 fep->bufdesc_ex &&
1485 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1479 /* Push and remove the vlan tag */ 1486 /* Push and remove the vlan tag */
1480 struct vlan_hdr *vlan_header = 1487 struct vlan_hdr *vlan_header =
1481 (struct vlan_hdr *) (data + ETH_HLEN); 1488 (struct vlan_hdr *) (data + ETH_HLEN);
@@ -1491,12 +1498,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1491 1498
1492 /* Get receive timestamp from the skb */ 1499 /* Get receive timestamp from the skb */
1493 if (fep->hwts_rx_en && fep->bufdesc_ex) 1500 if (fep->hwts_rx_en && fep->bufdesc_ex)
1494 fec_enet_hwtstamp(fep, ebdp->ts, 1501 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1495 skb_hwtstamps(skb)); 1502 skb_hwtstamps(skb));
1496 1503
1497 if (fep->bufdesc_ex && 1504 if (fep->bufdesc_ex &&
1498 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 1505 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1499 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { 1506 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1500 /* don't check it */ 1507 /* don't check it */
1501 skb->ip_summed = CHECKSUM_UNNECESSARY; 1508 skb->ip_summed = CHECKSUM_UNNECESSARY;
1502 } else { 1509 } else {
@@ -1513,7 +1520,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1513 napi_gro_receive(&fep->napi, skb); 1520 napi_gro_receive(&fep->napi, skb);
1514 1521
1515 if (is_copybreak) { 1522 if (is_copybreak) {
1516 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, 1523 dma_sync_single_for_device(&fep->pdev->dev,
1524 fec32_to_cpu(bdp->cbd_bufaddr),
1517 FEC_ENET_RX_FRSIZE - fep->rx_align, 1525 FEC_ENET_RX_FRSIZE - fep->rx_align,
1518 DMA_FROM_DEVICE); 1526 DMA_FROM_DEVICE);
1519 } else { 1527 } else {
@@ -1527,12 +1535,12 @@ rx_processing_done:
1527 1535
1528 /* Mark the buffer empty */ 1536 /* Mark the buffer empty */
1529 status |= BD_ENET_RX_EMPTY; 1537 status |= BD_ENET_RX_EMPTY;
1530 bdp->cbd_sc = status; 1538 bdp->cbd_sc = cpu_to_fec16(status);
1531 1539
1532 if (fep->bufdesc_ex) { 1540 if (fep->bufdesc_ex) {
1533 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1541 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1534 1542
1535 ebdp->cbd_esc = BD_ENET_RX_INT; 1543 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1536 ebdp->cbd_prot = 0; 1544 ebdp->cbd_prot = 0;
1537 ebdp->cbd_bdu = 0; 1545 ebdp->cbd_bdu = 0;
1538 } 1546 }
@@ -2145,8 +2153,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
2145 2153
2146/* List of registers that can be safety be read to dump them with ethtool */ 2154/* List of registers that can be safety be read to dump them with ethtool */
2147#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2155#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2148 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 2156 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
2149 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
2150static u32 fec_enet_register_offset[] = { 2157static u32 fec_enet_register_offset[] = {
2151 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2158 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2152 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2159 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2662,7 +2669,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
2662 rxq->rx_skbuff[i] = NULL; 2669 rxq->rx_skbuff[i] = NULL;
2663 if (skb) { 2670 if (skb) {
2664 dma_unmap_single(&fep->pdev->dev, 2671 dma_unmap_single(&fep->pdev->dev,
2665 bdp->cbd_bufaddr, 2672 fec32_to_cpu(bdp->cbd_bufaddr),
2666 FEC_ENET_RX_FRSIZE - fep->rx_align, 2673 FEC_ENET_RX_FRSIZE - fep->rx_align,
2667 DMA_FROM_DEVICE); 2674 DMA_FROM_DEVICE);
2668 dev_kfree_skb(skb); 2675 dev_kfree_skb(skb);
@@ -2777,11 +2784,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2777 } 2784 }
2778 2785
2779 rxq->rx_skbuff[i] = skb; 2786 rxq->rx_skbuff[i] = skb;
2780 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2787 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2781 2788
2782 if (fep->bufdesc_ex) { 2789 if (fep->bufdesc_ex) {
2783 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2790 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2784 ebdp->cbd_esc = BD_ENET_RX_INT; 2791 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2785 } 2792 }
2786 2793
2787 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 2794 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2789,7 +2796,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2789 2796
2790 /* Set the last buffer to wrap. */ 2797 /* Set the last buffer to wrap. */
2791 bdp = fec_enet_get_prevdesc(bdp, fep, queue); 2798 bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2792 bdp->cbd_sc |= BD_SC_WRAP; 2799 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2793 return 0; 2800 return 0;
2794 2801
2795 err_alloc: 2802 err_alloc:
@@ -2812,12 +2819,12 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2812 if (!txq->tx_bounce[i]) 2819 if (!txq->tx_bounce[i])
2813 goto err_alloc; 2820 goto err_alloc;
2814 2821
2815 bdp->cbd_sc = 0; 2822 bdp->cbd_sc = cpu_to_fec16(0);
2816 bdp->cbd_bufaddr = 0; 2823 bdp->cbd_bufaddr = cpu_to_fec32(0);
2817 2824
2818 if (fep->bufdesc_ex) { 2825 if (fep->bufdesc_ex) {
2819 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2826 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2820 ebdp->cbd_esc = BD_ENET_TX_INT; 2827 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2821 } 2828 }
2822 2829
2823 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 2830 bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2825,7 +2832,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
2825 2832
2826 /* Set the last buffer to wrap. */ 2833 /* Set the last buffer to wrap. */
2827 bdp = fec_enet_get_prevdesc(bdp, fep, queue); 2834 bdp = fec_enet_get_prevdesc(bdp, fep, queue);
2828 bdp->cbd_sc |= BD_SC_WRAP; 2835 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2829 2836
2830 return 0; 2837 return 0;
2831 2838