aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/calxeda/xgmac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/calxeda/xgmac.c')
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c59
1 files changed, 26 insertions, 33 deletions
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 16814b34d4b6..b407043ce9b0 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -191,6 +191,7 @@
191#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ 191#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
192#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ 192#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
193#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ 193#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
194#define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */
194 195
195/* DMA Normal interrupt */ 196/* DMA Normal interrupt */
196#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ 197#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
@@ -210,7 +211,7 @@
210#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ 211#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
211 212
212#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ 213#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
213 DMA_INTR_ENA_TUE) 214 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
214 215
215#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ 216#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
216 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ 217 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
@@ -373,6 +374,7 @@ struct xgmac_priv {
373 struct sk_buff **tx_skbuff; 374 struct sk_buff **tx_skbuff;
374 unsigned int tx_head; 375 unsigned int tx_head;
375 unsigned int tx_tail; 376 unsigned int tx_tail;
377 int tx_irq_cnt;
376 378
377 void __iomem *base; 379 void __iomem *base;
378 unsigned int dma_buf_sz; 380 unsigned int dma_buf_sz;
@@ -663,6 +665,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
663{ 665{
664 struct xgmac_dma_desc *p; 666 struct xgmac_dma_desc *p;
665 dma_addr_t paddr; 667 dma_addr_t paddr;
668 int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
666 669
667 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { 670 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
668 int entry = priv->rx_head; 671 int entry = priv->rx_head;
@@ -671,13 +674,13 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
671 p = priv->dma_rx + entry; 674 p = priv->dma_rx + entry;
672 675
673 if (priv->rx_skbuff[entry] == NULL) { 676 if (priv->rx_skbuff[entry] == NULL) {
674 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); 677 skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
675 if (unlikely(skb == NULL)) 678 if (unlikely(skb == NULL))
676 break; 679 break;
677 680
678 priv->rx_skbuff[entry] = skb; 681 priv->rx_skbuff[entry] = skb;
679 paddr = dma_map_single(priv->device, skb->data, 682 paddr = dma_map_single(priv->device, skb->data,
680 priv->dma_buf_sz, DMA_FROM_DEVICE); 683 bufsz, DMA_FROM_DEVICE);
681 desc_set_buf_addr(p, paddr, priv->dma_buf_sz); 684 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
682 } 685 }
683 686
@@ -701,10 +704,10 @@ static int xgmac_dma_desc_rings_init(struct net_device *dev)
701 unsigned int bfsize; 704 unsigned int bfsize;
702 705
703 /* Set the Buffer size according to the MTU; 706 /* Set the Buffer size according to the MTU;
704 * indeed, in case of jumbo we need to bump-up the buffer sizes. 707 * The total buffer size including any IP offset must be a multiple
708 * of 8 bytes.
705 */ 709 */
706 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64, 710 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
707 64);
708 711
709 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); 712 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
710 713
@@ -845,9 +848,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
845static void xgmac_tx_complete(struct xgmac_priv *priv) 848static void xgmac_tx_complete(struct xgmac_priv *priv)
846{ 849{
847 int i; 850 int i;
848 void __iomem *ioaddr = priv->base;
849
850 writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
851 851
852 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { 852 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
853 unsigned int entry = priv->tx_tail; 853 unsigned int entry = priv->tx_tail;
@@ -888,7 +888,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
888 } 888 }
889 889
890 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 890 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
891 TX_THRESH) 891 MAX_SKB_FRAGS)
892 netif_wake_queue(priv->dev); 892 netif_wake_queue(priv->dev);
893} 893}
894 894
@@ -965,8 +965,7 @@ static int xgmac_hw_init(struct net_device *dev)
965 ctrl |= XGMAC_CONTROL_IPC; 965 ctrl |= XGMAC_CONTROL_IPC;
966 writel(ctrl, ioaddr + XGMAC_CONTROL); 966 writel(ctrl, ioaddr + XGMAC_CONTROL);
967 967
968 value = DMA_CONTROL_DFF; 968 writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
969 writel(value, ioaddr + XGMAC_DMA_CONTROL);
970 969
971 /* Set the HW DMA mode and the COE */ 970 /* Set the HW DMA mode and the COE */
972 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA | 971 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
@@ -1060,19 +1059,15 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1060 struct xgmac_priv *priv = netdev_priv(dev); 1059 struct xgmac_priv *priv = netdev_priv(dev);
1061 unsigned int entry; 1060 unsigned int entry;
1062 int i; 1061 int i;
1062 u32 irq_flag;
1063 int nfrags = skb_shinfo(skb)->nr_frags; 1063 int nfrags = skb_shinfo(skb)->nr_frags;
1064 struct xgmac_dma_desc *desc, *first; 1064 struct xgmac_dma_desc *desc, *first;
1065 unsigned int desc_flags; 1065 unsigned int desc_flags;
1066 unsigned int len; 1066 unsigned int len;
1067 dma_addr_t paddr; 1067 dma_addr_t paddr;
1068 1068
1069 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < 1069 priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
1070 (nfrags + 1)) { 1070 irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
1071 writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
1072 priv->base + XGMAC_DMA_INTR_ENA);
1073 netif_stop_queue(dev);
1074 return NETDEV_TX_BUSY;
1075 }
1076 1071
1077 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? 1072 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
1078 TXDESC_CSUM_ALL : 0; 1073 TXDESC_CSUM_ALL : 0;
@@ -1113,9 +1108,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1113 /* Interrupt on completition only for the latest segment */ 1108 /* Interrupt on completition only for the latest segment */
1114 if (desc != first) 1109 if (desc != first)
1115 desc_set_tx_owner(desc, desc_flags | 1110 desc_set_tx_owner(desc, desc_flags |
1116 TXDESC_LAST_SEG | TXDESC_INTERRUPT); 1111 TXDESC_LAST_SEG | irq_flag);
1117 else 1112 else
1118 desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT; 1113 desc_flags |= TXDESC_LAST_SEG | irq_flag;
1119 1114
1120 /* Set owner on first desc last to avoid race condition */ 1115 /* Set owner on first desc last to avoid race condition */
1121 wmb(); 1116 wmb();
@@ -1124,6 +1119,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1124 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); 1119 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1125 1120
1126 writel(1, priv->base + XGMAC_DMA_TX_POLL); 1121 writel(1, priv->base + XGMAC_DMA_TX_POLL);
1122 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
1123 MAX_SKB_FRAGS)
1124 netif_stop_queue(dev);
1127 1125
1128 return NETDEV_TX_OK; 1126 return NETDEV_TX_OK;
1129} 1127}
@@ -1139,9 +1137,6 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
1139 struct sk_buff *skb; 1137 struct sk_buff *skb;
1140 int frame_len; 1138 int frame_len;
1141 1139
1142 writel(DMA_STATUS_RI | DMA_STATUS_NIS,
1143 priv->base + XGMAC_DMA_STATUS);
1144
1145 entry = priv->rx_tail; 1140 entry = priv->rx_tail;
1146 p = priv->dma_rx + entry; 1141 p = priv->dma_rx + entry;
1147 if (desc_get_owner(p)) 1142 if (desc_get_owner(p))
@@ -1180,8 +1175,6 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
1180 1175
1181 xgmac_rx_refill(priv); 1176 xgmac_rx_refill(priv);
1182 1177
1183 writel(1, priv->base + XGMAC_DMA_RX_POLL);
1184
1185 return count; 1178 return count;
1186} 1179}
1187 1180
@@ -1205,7 +1198,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
1205 1198
1206 if (work_done < budget) { 1199 if (work_done < budget) {
1207 napi_complete(napi); 1200 napi_complete(napi);
1208 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); 1201 __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
1209 } 1202 }
1210 return work_done; 1203 return work_done;
1211} 1204}
@@ -1350,7 +1343,7 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1350 struct xgmac_priv *priv = netdev_priv(dev); 1343 struct xgmac_priv *priv = netdev_priv(dev);
1351 void __iomem *ioaddr = priv->base; 1344 void __iomem *ioaddr = priv->base;
1352 1345
1353 intr_status = readl(ioaddr + XGMAC_INT_STAT); 1346 intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
1354 if (intr_status & XGMAC_INT_STAT_PMT) { 1347 if (intr_status & XGMAC_INT_STAT_PMT) {
1355 netdev_dbg(priv->dev, "received Magic frame\n"); 1348 netdev_dbg(priv->dev, "received Magic frame\n");
1356 /* clear the PMT bits 5 and 6 by reading the PMT */ 1349 /* clear the PMT bits 5 and 6 by reading the PMT */
@@ -1368,9 +1361,9 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1368 struct xgmac_extra_stats *x = &priv->xstats; 1361 struct xgmac_extra_stats *x = &priv->xstats;
1369 1362
1370 /* read the status register (CSR5) */ 1363 /* read the status register (CSR5) */
1371 intr_status = readl(priv->base + XGMAC_DMA_STATUS); 1364 intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
1372 intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA); 1365 intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
1373 writel(intr_status, priv->base + XGMAC_DMA_STATUS); 1366 __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
1374 1367
1375 /* It displays the DMA process states (CSR5 register) */ 1368 /* It displays the DMA process states (CSR5 register) */
1376 /* ABNORMAL interrupts */ 1369 /* ABNORMAL interrupts */
@@ -1405,8 +1398,8 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1405 } 1398 }
1406 1399
1407 /* TX/RX NORMAL interrupts */ 1400 /* TX/RX NORMAL interrupts */
1408 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) { 1401 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
1409 writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); 1402 __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
1410 napi_schedule(&priv->napi); 1403 napi_schedule(&priv->napi);
1411 } 1404 }
1412 1405