diff options
Diffstat (limited to 'drivers/net/ethernet/cadence/macb.c')
-rw-r--r-- | drivers/net/ethernet/cadence/macb.c | 188 |
1 files changed, 130 insertions, 58 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index c0fb80acc2da..baba2db9d9c2 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -43,13 +43,13 @@ | |||
43 | #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ | 43 | #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ |
44 | #define MIN_RX_RING_SIZE 64 | 44 | #define MIN_RX_RING_SIZE 64 |
45 | #define MAX_RX_RING_SIZE 8192 | 45 | #define MAX_RX_RING_SIZE 8192 |
46 | #define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ | 46 | #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ |
47 | * (bp)->rx_ring_size) | 47 | * (bp)->rx_ring_size) |
48 | 48 | ||
49 | #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ | 49 | #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ |
50 | #define MIN_TX_RING_SIZE 64 | 50 | #define MIN_TX_RING_SIZE 64 |
51 | #define MAX_TX_RING_SIZE 4096 | 51 | #define MAX_TX_RING_SIZE 4096 |
52 | #define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ | 52 | #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ |
53 | * (bp)->tx_ring_size) | 53 | * (bp)->tx_ring_size) |
54 | 54 | ||
55 | /* level of occupied TX descriptors under which we wake up TX process */ | 55 | /* level of occupied TX descriptors under which we wake up TX process */ |
@@ -78,6 +78,37 @@ | |||
78 | */ | 78 | */ |
79 | #define MACB_HALT_TIMEOUT 1230 | 79 | #define MACB_HALT_TIMEOUT 1230 |
80 | 80 | ||
81 | /* DMA buffer descriptor might be different size | ||
82 | * depends on hardware configuration. | ||
83 | */ | ||
84 | static unsigned int macb_dma_desc_get_size(struct macb *bp) | ||
85 | { | ||
86 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
87 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) | ||
88 | return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64); | ||
89 | #endif | ||
90 | return sizeof(struct macb_dma_desc); | ||
91 | } | ||
92 | |||
93 | static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx) | ||
94 | { | ||
95 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
96 | /* Dma buffer descriptor is 4 words length (instead of 2 words) | ||
97 | * for 64b GEM. | ||
98 | */ | ||
99 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) | ||
100 | idx <<= 1; | ||
101 | #endif | ||
102 | return idx; | ||
103 | } | ||
104 | |||
105 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
106 | static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) | ||
107 | { | ||
108 | return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc)); | ||
109 | } | ||
110 | #endif | ||
111 | |||
81 | /* Ring buffer accessors */ | 112 | /* Ring buffer accessors */ |
82 | static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) | 113 | static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) |
83 | { | 114 | { |
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) | |||
87 | static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, | 118 | static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, |
88 | unsigned int index) | 119 | unsigned int index) |
89 | { | 120 | { |
90 | return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)]; | 121 | index = macb_tx_ring_wrap(queue->bp, index); |
122 | index = macb_adj_dma_desc_idx(queue->bp, index); | ||
123 | return &queue->tx_ring[index]; | ||
91 | } | 124 | } |
92 | 125 | ||
93 | static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, | 126 | static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, |
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) | |||
101 | dma_addr_t offset; | 134 | dma_addr_t offset; |
102 | 135 | ||
103 | offset = macb_tx_ring_wrap(queue->bp, index) * | 136 | offset = macb_tx_ring_wrap(queue->bp, index) * |
104 | sizeof(struct macb_dma_desc); | 137 | macb_dma_desc_get_size(queue->bp); |
105 | 138 | ||
106 | return queue->tx_ring_dma + offset; | 139 | return queue->tx_ring_dma + offset; |
107 | } | 140 | } |
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) | |||
113 | 146 | ||
114 | static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) | 147 | static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) |
115 | { | 148 | { |
116 | return &bp->rx_ring[macb_rx_ring_wrap(bp, index)]; | 149 | index = macb_rx_ring_wrap(bp, index); |
150 | index = macb_adj_dma_desc_idx(bp, index); | ||
151 | return &bp->rx_ring[index]; | ||
117 | } | 152 | } |
118 | 153 | ||
119 | static void *macb_rx_buffer(struct macb *bp, unsigned int index) | 154 | static void *macb_rx_buffer(struct macb *bp, unsigned int index) |
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) | |||
560 | } | 595 | } |
561 | } | 596 | } |
562 | 597 | ||
563 | static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr) | 598 | static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) |
564 | { | 599 | { |
565 | desc->addr = (u32)addr; | ||
566 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 600 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
567 | desc->addrh = (u32)(addr >> 32); | 601 | struct macb_dma_desc_64 *desc_64; |
602 | |||
603 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) { | ||
604 | desc_64 = macb_64b_desc(bp, desc); | ||
605 | desc_64->addrh = upper_32_bits(addr); | ||
606 | } | ||
568 | #endif | 607 | #endif |
608 | desc->addr = lower_32_bits(addr); | ||
609 | } | ||
610 | |||
611 | static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) | ||
612 | { | ||
613 | dma_addr_t addr = 0; | ||
614 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
615 | struct macb_dma_desc_64 *desc_64; | ||
616 | |||
617 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) { | ||
618 | desc_64 = macb_64b_desc(bp, desc); | ||
619 | addr = ((u64)(desc_64->addrh) << 32); | ||
620 | } | ||
621 | #endif | ||
622 | addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); | ||
623 | return addr; | ||
569 | } | 624 | } |
570 | 625 | ||
571 | static void macb_tx_error_task(struct work_struct *work) | 626 | static void macb_tx_error_task(struct work_struct *work) |
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work) | |||
649 | 704 | ||
650 | /* Set end of TX queue */ | 705 | /* Set end of TX queue */ |
651 | desc = macb_tx_desc(queue, 0); | 706 | desc = macb_tx_desc(queue, 0); |
652 | macb_set_addr(desc, 0); | 707 | macb_set_addr(bp, desc, 0); |
653 | desc->ctrl = MACB_BIT(TX_USED); | 708 | desc->ctrl = MACB_BIT(TX_USED); |
654 | 709 | ||
655 | /* Make descriptor updates visible to hardware */ | 710 | /* Make descriptor updates visible to hardware */ |
656 | wmb(); | 711 | wmb(); |
657 | 712 | ||
658 | /* Reinitialize the TX desc queue */ | 713 | /* Reinitialize the TX desc queue */ |
659 | queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); | 714 | queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
660 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 715 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
661 | queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); | 716 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
717 | queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); | ||
662 | #endif | 718 | #endif |
663 | /* Make TX ring reflect state of hardware */ | 719 | /* Make TX ring reflect state of hardware */ |
664 | queue->tx_head = 0; | 720 | queue->tx_head = 0; |
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp) | |||
750 | unsigned int entry; | 806 | unsigned int entry; |
751 | struct sk_buff *skb; | 807 | struct sk_buff *skb; |
752 | dma_addr_t paddr; | 808 | dma_addr_t paddr; |
809 | struct macb_dma_desc *desc; | ||
753 | 810 | ||
754 | while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, | 811 | while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, |
755 | bp->rx_ring_size) > 0) { | 812 | bp->rx_ring_size) > 0) { |
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp) | |||
759 | rmb(); | 816 | rmb(); |
760 | 817 | ||
761 | bp->rx_prepared_head++; | 818 | bp->rx_prepared_head++; |
819 | desc = macb_rx_desc(bp, entry); | ||
762 | 820 | ||
763 | if (!bp->rx_skbuff[entry]) { | 821 | if (!bp->rx_skbuff[entry]) { |
764 | /* allocate sk_buff for this free entry in ring */ | 822 | /* allocate sk_buff for this free entry in ring */ |
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp) | |||
782 | 840 | ||
783 | if (entry == bp->rx_ring_size - 1) | 841 | if (entry == bp->rx_ring_size - 1) |
784 | paddr |= MACB_BIT(RX_WRAP); | 842 | paddr |= MACB_BIT(RX_WRAP); |
785 | macb_set_addr(&(bp->rx_ring[entry]), paddr); | 843 | macb_set_addr(bp, desc, paddr); |
786 | bp->rx_ring[entry].ctrl = 0; | 844 | desc->ctrl = 0; |
787 | 845 | ||
788 | /* properly align Ethernet header */ | 846 | /* properly align Ethernet header */ |
789 | skb_reserve(skb, NET_IP_ALIGN); | 847 | skb_reserve(skb, NET_IP_ALIGN); |
790 | } else { | 848 | } else { |
791 | bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); | 849 | desc->addr &= ~MACB_BIT(RX_USED); |
792 | bp->rx_ring[entry].ctrl = 0; | 850 | desc->ctrl = 0; |
793 | } | 851 | } |
794 | } | 852 | } |
795 | 853 | ||
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget) | |||
835 | bool rxused; | 893 | bool rxused; |
836 | 894 | ||
837 | entry = macb_rx_ring_wrap(bp, bp->rx_tail); | 895 | entry = macb_rx_ring_wrap(bp, bp->rx_tail); |
838 | desc = &bp->rx_ring[entry]; | 896 | desc = macb_rx_desc(bp, entry); |
839 | 897 | ||
840 | /* Make hw descriptor updates visible to CPU */ | 898 | /* Make hw descriptor updates visible to CPU */ |
841 | rmb(); | 899 | rmb(); |
842 | 900 | ||
843 | rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; | 901 | rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; |
844 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); | 902 | addr = macb_get_addr(bp, desc); |
845 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
846 | addr |= ((u64)(desc->addrh) << 32); | ||
847 | #endif | ||
848 | ctrl = desc->ctrl; | 903 | ctrl = desc->ctrl; |
849 | 904 | ||
850 | if (!rxused) | 905 | if (!rxused) |
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |||
987 | static inline void macb_init_rx_ring(struct macb *bp) | 1042 | static inline void macb_init_rx_ring(struct macb *bp) |
988 | { | 1043 | { |
989 | dma_addr_t addr; | 1044 | dma_addr_t addr; |
1045 | struct macb_dma_desc *desc = NULL; | ||
990 | int i; | 1046 | int i; |
991 | 1047 | ||
992 | addr = bp->rx_buffers_dma; | 1048 | addr = bp->rx_buffers_dma; |
993 | for (i = 0; i < bp->rx_ring_size; i++) { | 1049 | for (i = 0; i < bp->rx_ring_size; i++) { |
994 | bp->rx_ring[i].addr = addr; | 1050 | desc = macb_rx_desc(bp, i); |
995 | bp->rx_ring[i].ctrl = 0; | 1051 | macb_set_addr(bp, desc, addr); |
1052 | desc->ctrl = 0; | ||
996 | addr += bp->rx_buffer_size; | 1053 | addr += bp->rx_buffer_size; |
997 | } | 1054 | } |
998 | bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP); | 1055 | desc->addr |= MACB_BIT(RX_WRAP); |
999 | bp->rx_tail = 0; | 1056 | bp->rx_tail = 0; |
1000 | } | 1057 | } |
1001 | 1058 | ||
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget) | |||
1008 | 1065 | ||
1009 | for (tail = bp->rx_tail; budget > 0; tail++) { | 1066 | for (tail = bp->rx_tail; budget > 0; tail++) { |
1010 | struct macb_dma_desc *desc = macb_rx_desc(bp, tail); | 1067 | struct macb_dma_desc *desc = macb_rx_desc(bp, tail); |
1011 | u32 addr, ctrl; | 1068 | u32 ctrl; |
1012 | 1069 | ||
1013 | /* Make hw descriptor updates visible to CPU */ | 1070 | /* Make hw descriptor updates visible to CPU */ |
1014 | rmb(); | 1071 | rmb(); |
1015 | 1072 | ||
1016 | addr = desc->addr; | ||
1017 | ctrl = desc->ctrl; | 1073 | ctrl = desc->ctrl; |
1018 | 1074 | ||
1019 | if (!(addr & MACB_BIT(RX_USED))) | 1075 | if (!(desc->addr & MACB_BIT(RX_USED))) |
1020 | break; | 1076 | break; |
1021 | 1077 | ||
1022 | if (ctrl & MACB_BIT(RX_SOF)) { | 1078 | if (ctrl & MACB_BIT(RX_SOF)) { |
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp, | |||
1336 | i = tx_head; | 1392 | i = tx_head; |
1337 | entry = macb_tx_ring_wrap(bp, i); | 1393 | entry = macb_tx_ring_wrap(bp, i); |
1338 | ctrl = MACB_BIT(TX_USED); | 1394 | ctrl = MACB_BIT(TX_USED); |
1339 | desc = &queue->tx_ring[entry]; | 1395 | desc = macb_tx_desc(queue, entry); |
1340 | desc->ctrl = ctrl; | 1396 | desc->ctrl = ctrl; |
1341 | 1397 | ||
1342 | if (lso_ctrl) { | 1398 | if (lso_ctrl) { |
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp, | |||
1358 | i--; | 1414 | i--; |
1359 | entry = macb_tx_ring_wrap(bp, i); | 1415 | entry = macb_tx_ring_wrap(bp, i); |
1360 | tx_skb = &queue->tx_skb[entry]; | 1416 | tx_skb = &queue->tx_skb[entry]; |
1361 | desc = &queue->tx_ring[entry]; | 1417 | desc = macb_tx_desc(queue, entry); |
1362 | 1418 | ||
1363 | ctrl = (u32)tx_skb->size; | 1419 | ctrl = (u32)tx_skb->size; |
1364 | if (eof) { | 1420 | if (eof) { |
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp, | |||
1379 | ctrl |= MACB_BF(MSS_MFS, mss_mfs); | 1435 | ctrl |= MACB_BF(MSS_MFS, mss_mfs); |
1380 | 1436 | ||
1381 | /* Set TX buffer descriptor */ | 1437 | /* Set TX buffer descriptor */ |
1382 | macb_set_addr(desc, tx_skb->mapping); | 1438 | macb_set_addr(bp, desc, tx_skb->mapping); |
1383 | /* desc->addr must be visible to hardware before clearing | 1439 | /* desc->addr must be visible to hardware before clearing |
1384 | * 'TX_USED' bit in desc->ctrl. | 1440 | * 'TX_USED' bit in desc->ctrl. |
1385 | */ | 1441 | */ |
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp) | |||
1586 | if (!skb) | 1642 | if (!skb) |
1587 | continue; | 1643 | continue; |
1588 | 1644 | ||
1589 | desc = &bp->rx_ring[i]; | 1645 | desc = macb_rx_desc(bp, i); |
1590 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); | 1646 | addr = macb_get_addr(bp, desc); |
1591 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 1647 | |
1592 | addr |= ((u64)(desc->addrh) << 32); | ||
1593 | #endif | ||
1594 | dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, | 1648 | dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, |
1595 | DMA_FROM_DEVICE); | 1649 | DMA_FROM_DEVICE); |
1596 | dev_kfree_skb_any(skb); | 1650 | dev_kfree_skb_any(skb); |
@@ -1711,15 +1765,17 @@ out_err: | |||
1711 | static void gem_init_rings(struct macb *bp) | 1765 | static void gem_init_rings(struct macb *bp) |
1712 | { | 1766 | { |
1713 | struct macb_queue *queue; | 1767 | struct macb_queue *queue; |
1768 | struct macb_dma_desc *desc = NULL; | ||
1714 | unsigned int q; | 1769 | unsigned int q; |
1715 | int i; | 1770 | int i; |
1716 | 1771 | ||
1717 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | 1772 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
1718 | for (i = 0; i < bp->tx_ring_size; i++) { | 1773 | for (i = 0; i < bp->tx_ring_size; i++) { |
1719 | queue->tx_ring[i].addr = 0; | 1774 | desc = macb_tx_desc(queue, i); |
1720 | queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); | 1775 | macb_set_addr(bp, desc, 0); |
1776 | desc->ctrl = MACB_BIT(TX_USED); | ||
1721 | } | 1777 | } |
1722 | queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); | 1778 | desc->ctrl |= MACB_BIT(TX_WRAP); |
1723 | queue->tx_head = 0; | 1779 | queue->tx_head = 0; |
1724 | queue->tx_tail = 0; | 1780 | queue->tx_tail = 0; |
1725 | } | 1781 | } |
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp) | |||
1733 | static void macb_init_rings(struct macb *bp) | 1789 | static void macb_init_rings(struct macb *bp) |
1734 | { | 1790 | { |
1735 | int i; | 1791 | int i; |
1792 | struct macb_dma_desc *desc = NULL; | ||
1736 | 1793 | ||
1737 | macb_init_rx_ring(bp); | 1794 | macb_init_rx_ring(bp); |
1738 | 1795 | ||
1739 | for (i = 0; i < bp->tx_ring_size; i++) { | 1796 | for (i = 0; i < bp->tx_ring_size; i++) { |
1740 | bp->queues[0].tx_ring[i].addr = 0; | 1797 | desc = macb_tx_desc(&bp->queues[0], i); |
1741 | bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); | 1798 | macb_set_addr(bp, desc, 0); |
1799 | desc->ctrl = MACB_BIT(TX_USED); | ||
1742 | } | 1800 | } |
1743 | bp->queues[0].tx_head = 0; | 1801 | bp->queues[0].tx_head = 0; |
1744 | bp->queues[0].tx_tail = 0; | 1802 | bp->queues[0].tx_tail = 0; |
1745 | bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); | 1803 | desc->ctrl |= MACB_BIT(TX_WRAP); |
1746 | } | 1804 | } |
1747 | 1805 | ||
1748 | static void macb_reset_hw(struct macb *bp) | 1806 | static void macb_reset_hw(struct macb *bp) |
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp) | |||
1863 | dmacfg &= ~GEM_BIT(TXCOEN); | 1921 | dmacfg &= ~GEM_BIT(TXCOEN); |
1864 | 1922 | ||
1865 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 1923 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1866 | dmacfg |= GEM_BIT(ADDR64); | 1924 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
1925 | dmacfg |= GEM_BIT(ADDR64); | ||
1867 | #endif | 1926 | #endif |
1868 | netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", | 1927 | netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", |
1869 | dmacfg); | 1928 | dmacfg); |
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp) | |||
1910 | macb_configure_dma(bp); | 1969 | macb_configure_dma(bp); |
1911 | 1970 | ||
1912 | /* Initialize TX and RX buffers */ | 1971 | /* Initialize TX and RX buffers */ |
1913 | macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma)); | 1972 | macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma)); |
1914 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 1973 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1915 | macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32)); | 1974 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
1975 | macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma)); | ||
1916 | #endif | 1976 | #endif |
1917 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | 1977 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
1918 | queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); | 1978 | queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
1919 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 1979 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1920 | queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); | 1980 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
1981 | queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); | ||
1921 | #endif | 1982 | #endif |
1922 | 1983 | ||
1923 | /* Enable interrupts */ | 1984 | /* Enable interrupts */ |
@@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev) | |||
2627 | queue->IMR = GEM_IMR(hw_q - 1); | 2688 | queue->IMR = GEM_IMR(hw_q - 1); |
2628 | queue->TBQP = GEM_TBQP(hw_q - 1); | 2689 | queue->TBQP = GEM_TBQP(hw_q - 1); |
2629 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 2690 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
2630 | queue->TBQPH = GEM_TBQPH(hw_q -1); | 2691 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
2692 | queue->TBQPH = GEM_TBQPH(hw_q - 1); | ||
2631 | #endif | 2693 | #endif |
2632 | } else { | 2694 | } else { |
2633 | /* queue0 uses legacy registers */ | 2695 | /* queue0 uses legacy registers */ |
@@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev) | |||
2637 | queue->IMR = MACB_IMR; | 2699 | queue->IMR = MACB_IMR; |
2638 | queue->TBQP = MACB_TBQP; | 2700 | queue->TBQP = MACB_TBQP; |
2639 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 2701 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
2640 | queue->TBQPH = MACB_TBQPH; | 2702 | if (bp->hw_dma_cap == HW_DMA_CAP_64B) |
2703 | queue->TBQPH = MACB_TBQPH; | ||
2641 | #endif | 2704 | #endif |
2642 | } | 2705 | } |
2643 | 2706 | ||
@@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev) | |||
2730 | static int at91ether_start(struct net_device *dev) | 2793 | static int at91ether_start(struct net_device *dev) |
2731 | { | 2794 | { |
2732 | struct macb *lp = netdev_priv(dev); | 2795 | struct macb *lp = netdev_priv(dev); |
2796 | struct macb_dma_desc *desc; | ||
2733 | dma_addr_t addr; | 2797 | dma_addr_t addr; |
2734 | u32 ctl; | 2798 | u32 ctl; |
2735 | int i; | 2799 | int i; |
2736 | 2800 | ||
2737 | lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, | 2801 | lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, |
2738 | (AT91ETHER_MAX_RX_DESCR * | 2802 | (AT91ETHER_MAX_RX_DESCR * |
2739 | sizeof(struct macb_dma_desc)), | 2803 | macb_dma_desc_get_size(lp)), |
2740 | &lp->rx_ring_dma, GFP_KERNEL); | 2804 | &lp->rx_ring_dma, GFP_KERNEL); |
2741 | if (!lp->rx_ring) | 2805 | if (!lp->rx_ring) |
2742 | return -ENOMEM; | 2806 | return -ENOMEM; |
@@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev) | |||
2748 | if (!lp->rx_buffers) { | 2812 | if (!lp->rx_buffers) { |
2749 | dma_free_coherent(&lp->pdev->dev, | 2813 | dma_free_coherent(&lp->pdev->dev, |
2750 | AT91ETHER_MAX_RX_DESCR * | 2814 | AT91ETHER_MAX_RX_DESCR * |
2751 | sizeof(struct macb_dma_desc), | 2815 | macb_dma_desc_get_size(lp), |
2752 | lp->rx_ring, lp->rx_ring_dma); | 2816 | lp->rx_ring, lp->rx_ring_dma); |
2753 | lp->rx_ring = NULL; | 2817 | lp->rx_ring = NULL; |
2754 | return -ENOMEM; | 2818 | return -ENOMEM; |
@@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev) | |||
2756 | 2820 | ||
2757 | addr = lp->rx_buffers_dma; | 2821 | addr = lp->rx_buffers_dma; |
2758 | for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { | 2822 | for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { |
2759 | lp->rx_ring[i].addr = addr; | 2823 | desc = macb_rx_desc(lp, i); |
2760 | lp->rx_ring[i].ctrl = 0; | 2824 | macb_set_addr(lp, desc, addr); |
2825 | desc->ctrl = 0; | ||
2761 | addr += AT91ETHER_MAX_RBUFF_SZ; | 2826 | addr += AT91ETHER_MAX_RBUFF_SZ; |
2762 | } | 2827 | } |
2763 | 2828 | ||
2764 | /* Set the Wrap bit on the last descriptor */ | 2829 | /* Set the Wrap bit on the last descriptor */ |
2765 | lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); | 2830 | desc->addr |= MACB_BIT(RX_WRAP); |
2766 | 2831 | ||
2767 | /* Reset buffer index */ | 2832 | /* Reset buffer index */ |
2768 | lp->rx_tail = 0; | 2833 | lp->rx_tail = 0; |
@@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev) | |||
2834 | 2899 | ||
2835 | dma_free_coherent(&lp->pdev->dev, | 2900 | dma_free_coherent(&lp->pdev->dev, |
2836 | AT91ETHER_MAX_RX_DESCR * | 2901 | AT91ETHER_MAX_RX_DESCR * |
2837 | sizeof(struct macb_dma_desc), | 2902 | macb_dma_desc_get_size(lp), |
2838 | lp->rx_ring, lp->rx_ring_dma); | 2903 | lp->rx_ring, lp->rx_ring_dma); |
2839 | lp->rx_ring = NULL; | 2904 | lp->rx_ring = NULL; |
2840 | 2905 | ||
@@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2885 | static void at91ether_rx(struct net_device *dev) | 2950 | static void at91ether_rx(struct net_device *dev) |
2886 | { | 2951 | { |
2887 | struct macb *lp = netdev_priv(dev); | 2952 | struct macb *lp = netdev_priv(dev); |
2953 | struct macb_dma_desc *desc; | ||
2888 | unsigned char *p_recv; | 2954 | unsigned char *p_recv; |
2889 | struct sk_buff *skb; | 2955 | struct sk_buff *skb; |
2890 | unsigned int pktlen; | 2956 | unsigned int pktlen; |
2891 | 2957 | ||
2892 | while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { | 2958 | desc = macb_rx_desc(lp, lp->rx_tail); |
2959 | while (desc->addr & MACB_BIT(RX_USED)) { | ||
2893 | p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; | 2960 | p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; |
2894 | pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); | 2961 | pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); |
2895 | skb = netdev_alloc_skb(dev, pktlen + 2); | 2962 | skb = netdev_alloc_skb(dev, pktlen + 2); |
2896 | if (skb) { | 2963 | if (skb) { |
2897 | skb_reserve(skb, 2); | 2964 | skb_reserve(skb, 2); |
@@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev) | |||
2905 | lp->stats.rx_dropped++; | 2972 | lp->stats.rx_dropped++; |
2906 | } | 2973 | } |
2907 | 2974 | ||
2908 | if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) | 2975 | if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) |
2909 | lp->stats.multicast++; | 2976 | lp->stats.multicast++; |
2910 | 2977 | ||
2911 | /* reset ownership bit */ | 2978 | /* reset ownership bit */ |
2912 | lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); | 2979 | desc->addr &= ~MACB_BIT(RX_USED); |
2913 | 2980 | ||
2914 | /* wrap after last buffer */ | 2981 | /* wrap after last buffer */ |
2915 | if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) | 2982 | if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) |
2916 | lp->rx_tail = 0; | 2983 | lp->rx_tail = 0; |
2917 | else | 2984 | else |
2918 | lp->rx_tail++; | 2985 | lp->rx_tail++; |
2986 | |||
2987 | desc = macb_rx_desc(lp, lp->rx_tail); | ||
2919 | } | 2988 | } |
2920 | } | 2989 | } |
2921 | 2990 | ||
@@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev) | |||
3211 | device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); | 3280 | device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); |
3212 | 3281 | ||
3213 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 3282 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
3214 | if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32) | 3283 | if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { |
3215 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); | 3284 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); |
3285 | bp->hw_dma_cap = HW_DMA_CAP_64B; | ||
3286 | } else | ||
3287 | bp->hw_dma_cap = HW_DMA_CAP_32B; | ||
3216 | #endif | 3288 | #endif |
3217 | 3289 | ||
3218 | spin_lock_init(&bp->lock); | 3290 | spin_lock_init(&bp->lock); |