diff options
-rw-r--r-- | drivers/net/ethernet/freescale/fec.c | 85 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/fec.h | 3 |
2 files changed, 41 insertions, 47 deletions
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index fccc3bf2141d..069a155d16ed 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c | |||
@@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
246 | struct bufdesc *bdp; | 246 | struct bufdesc *bdp; |
247 | void *bufaddr; | 247 | void *bufaddr; |
248 | unsigned short status; | 248 | unsigned short status; |
249 | unsigned long flags; | 249 | unsigned int index; |
250 | 250 | ||
251 | if (!fep->link) { | 251 | if (!fep->link) { |
252 | /* Link is down or autonegotiation is in progress. */ | 252 | /* Link is down or autonegotiation is in progress. */ |
253 | return NETDEV_TX_BUSY; | 253 | return NETDEV_TX_BUSY; |
254 | } | 254 | } |
255 | 255 | ||
256 | spin_lock_irqsave(&fep->hw_lock, flags); | ||
257 | /* Fill in a Tx ring entry */ | 256 | /* Fill in a Tx ring entry */ |
258 | bdp = fep->cur_tx; | 257 | bdp = fep->cur_tx; |
259 | 258 | ||
@@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
264 | * This should not happen, since ndev->tbusy should be set. | 263 | * This should not happen, since ndev->tbusy should be set. |
265 | */ | 264 | */ |
266 | printk("%s: tx queue full!.\n", ndev->name); | 265 | printk("%s: tx queue full!.\n", ndev->name); |
267 | spin_unlock_irqrestore(&fep->hw_lock, flags); | ||
268 | return NETDEV_TX_BUSY; | 266 | return NETDEV_TX_BUSY; |
269 | } | 267 | } |
270 | 268 | ||
@@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
280 | * 4-byte boundaries. Use bounce buffers to copy data | 278 | * 4-byte boundaries. Use bounce buffers to copy data |
281 | * and get it aligned. Ugh. | 279 | * and get it aligned. Ugh. |
282 | */ | 280 | */ |
281 | if (fep->bufdesc_ex) | ||
282 | index = (struct bufdesc_ex *)bdp - | ||
283 | (struct bufdesc_ex *)fep->tx_bd_base; | ||
284 | else | ||
285 | index = bdp - fep->tx_bd_base; | ||
286 | |||
283 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { | 287 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { |
284 | unsigned int index; | ||
285 | if (fep->bufdesc_ex) | ||
286 | index = (struct bufdesc_ex *)bdp - | ||
287 | (struct bufdesc_ex *)fep->tx_bd_base; | ||
288 | else | ||
289 | index = bdp - fep->tx_bd_base; | ||
290 | memcpy(fep->tx_bounce[index], skb->data, skb->len); | 288 | memcpy(fep->tx_bounce[index], skb->data, skb->len); |
291 | bufaddr = fep->tx_bounce[index]; | 289 | bufaddr = fep->tx_bounce[index]; |
292 | } | 290 | } |
@@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
300 | swap_buffer(bufaddr, skb->len); | 298 | swap_buffer(bufaddr, skb->len); |
301 | 299 | ||
302 | /* Save skb pointer */ | 300 | /* Save skb pointer */ |
303 | fep->tx_skbuff[fep->skb_cur] = skb; | 301 | fep->tx_skbuff[index] = skb; |
304 | |||
305 | ndev->stats.tx_bytes += skb->len; | ||
306 | fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; | ||
307 | 302 | ||
308 | /* Push the data cache so the CPM does not get stale memory | 303 | /* Push the data cache so the CPM does not get stale memory |
309 | * data. | 304 | * data. |
@@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
331 | ebdp->cbd_esc = BD_ENET_TX_INT; | 326 | ebdp->cbd_esc = BD_ENET_TX_INT; |
332 | } | 327 | } |
333 | } | 328 | } |
334 | /* Trigger transmission start */ | ||
335 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | ||
336 | |||
337 | /* If this was the last BD in the ring, start at the beginning again. */ | 329 | /* If this was the last BD in the ring, start at the beginning again. */ |
338 | if (status & BD_ENET_TX_WRAP) | 330 | if (status & BD_ENET_TX_WRAP) |
339 | bdp = fep->tx_bd_base; | 331 | bdp = fep->tx_bd_base; |
340 | else | 332 | else |
341 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 333 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); |
342 | 334 | ||
343 | if (bdp == fep->dirty_tx) { | 335 | fep->cur_tx = bdp; |
344 | fep->tx_full = 1; | 336 | |
337 | if (fep->cur_tx == fep->dirty_tx) | ||
345 | netif_stop_queue(ndev); | 338 | netif_stop_queue(ndev); |
346 | } | ||
347 | 339 | ||
348 | fep->cur_tx = bdp; | 340 | /* Trigger transmission start */ |
341 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | ||
349 | 342 | ||
350 | skb_tx_timestamp(skb); | 343 | skb_tx_timestamp(skb); |
351 | 344 | ||
352 | spin_unlock_irqrestore(&fep->hw_lock, flags); | ||
353 | |||
354 | return NETDEV_TX_OK; | 345 | return NETDEV_TX_OK; |
355 | } | 346 | } |
356 | 347 | ||
@@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex) | |||
406 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) | 397 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) |
407 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | 398 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); |
408 | 399 | ||
409 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | ||
410 | fep->cur_rx = fep->rx_bd_base; | 400 | fep->cur_rx = fep->rx_bd_base; |
411 | 401 | ||
412 | /* Reset SKB transmit buffers. */ | ||
413 | fep->skb_cur = fep->skb_dirty = 0; | ||
414 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { | 402 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
415 | if (fep->tx_skbuff[i]) { | 403 | if (fep->tx_skbuff[i]) { |
416 | dev_kfree_skb_any(fep->tx_skbuff[i]); | 404 | dev_kfree_skb_any(fep->tx_skbuff[i]); |
@@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev) | |||
573 | struct bufdesc *bdp; | 561 | struct bufdesc *bdp; |
574 | unsigned short status; | 562 | unsigned short status; |
575 | struct sk_buff *skb; | 563 | struct sk_buff *skb; |
564 | int index = 0; | ||
576 | 565 | ||
577 | fep = netdev_priv(ndev); | 566 | fep = netdev_priv(ndev); |
578 | spin_lock(&fep->hw_lock); | ||
579 | bdp = fep->dirty_tx; | 567 | bdp = fep->dirty_tx; |
580 | 568 | ||
569 | /* get next bdp of dirty_tx */ | ||
570 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | ||
571 | bdp = fep->tx_bd_base; | ||
572 | else | ||
573 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
574 | |||
581 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { | 575 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
582 | if (bdp == fep->cur_tx && fep->tx_full == 0) | 576 | |
577 | /* current queue is empty */ | ||
578 | if (bdp == fep->cur_tx) | ||
583 | break; | 579 | break; |
584 | 580 | ||
581 | if (fep->bufdesc_ex) | ||
582 | index = (struct bufdesc_ex *)bdp - | ||
583 | (struct bufdesc_ex *)fep->tx_bd_base; | ||
584 | else | ||
585 | index = bdp - fep->tx_bd_base; | ||
586 | |||
585 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | 587 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, |
586 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | 588 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); |
587 | bdp->cbd_bufaddr = 0; | 589 | bdp->cbd_bufaddr = 0; |
588 | 590 | ||
589 | skb = fep->tx_skbuff[fep->skb_dirty]; | 591 | skb = fep->tx_skbuff[index]; |
592 | |||
590 | /* Check for errors. */ | 593 | /* Check for errors. */ |
591 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 594 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
592 | BD_ENET_TX_RL | BD_ENET_TX_UN | | 595 | BD_ENET_TX_RL | BD_ENET_TX_UN | |
@@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev) | |||
631 | 634 | ||
632 | /* Free the sk buffer associated with this last transmit */ | 635 | /* Free the sk buffer associated with this last transmit */ |
633 | dev_kfree_skb_any(skb); | 636 | dev_kfree_skb_any(skb); |
634 | fep->tx_skbuff[fep->skb_dirty] = NULL; | 637 | fep->tx_skbuff[index] = NULL; |
635 | fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; | 638 | |
639 | fep->dirty_tx = bdp; | ||
636 | 640 | ||
637 | /* Update pointer to next buffer descriptor to be transmitted */ | 641 | /* Update pointer to next buffer descriptor to be transmitted */ |
638 | if (status & BD_ENET_TX_WRAP) | 642 | if (status & BD_ENET_TX_WRAP) |
@@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev) | |||
642 | 646 | ||
643 | /* Since we have freed up a buffer, the ring is no longer full | 647 | /* Since we have freed up a buffer, the ring is no longer full |
644 | */ | 648 | */ |
645 | if (fep->tx_full) { | 649 | if (fep->dirty_tx != fep->cur_tx) { |
646 | fep->tx_full = 0; | ||
647 | if (netif_queue_stopped(ndev)) | 650 | if (netif_queue_stopped(ndev)) |
648 | netif_wake_queue(ndev); | 651 | netif_wake_queue(ndev); |
649 | } | 652 | } |
650 | } | 653 | } |
651 | fep->dirty_tx = bdp; | 654 | return; |
652 | spin_unlock(&fep->hw_lock); | ||
653 | } | 655 | } |
654 | 656 | ||
655 | 657 | ||
@@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
816 | int_events = readl(fep->hwp + FEC_IEVENT); | 818 | int_events = readl(fep->hwp + FEC_IEVENT); |
817 | writel(int_events, fep->hwp + FEC_IEVENT); | 819 | writel(int_events, fep->hwp + FEC_IEVENT); |
818 | 820 | ||
819 | if (int_events & FEC_ENET_RXF) { | 821 | if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) { |
820 | ret = IRQ_HANDLED; | 822 | ret = IRQ_HANDLED; |
821 | 823 | ||
822 | /* Disable the RX interrupt */ | 824 | /* Disable the RX interrupt */ |
@@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
827 | } | 829 | } |
828 | } | 830 | } |
829 | 831 | ||
830 | /* Transmit OK, or non-fatal error. Update the buffer | ||
831 | * descriptors. FEC handles all errors, we just discover | ||
832 | * them as part of the transmit process. | ||
833 | */ | ||
834 | if (int_events & FEC_ENET_TXF) { | ||
835 | ret = IRQ_HANDLED; | ||
836 | fec_enet_tx(ndev); | ||
837 | } | ||
838 | |||
839 | if (int_events & FEC_ENET_MII) { | 832 | if (int_events & FEC_ENET_MII) { |
840 | ret = IRQ_HANDLED; | 833 | ret = IRQ_HANDLED; |
841 | complete(&fep->mdio_done); | 834 | complete(&fep->mdio_done); |
@@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) | |||
851 | int pkts = fec_enet_rx(ndev, budget); | 844 | int pkts = fec_enet_rx(ndev, budget); |
852 | struct fec_enet_private *fep = netdev_priv(ndev); | 845 | struct fec_enet_private *fep = netdev_priv(ndev); |
853 | 846 | ||
847 | fec_enet_tx(ndev); | ||
848 | |||
854 | if (pkts < budget) { | 849 | if (pkts < budget) { |
855 | napi_complete(napi); | 850 | napi_complete(napi); |
856 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | 851 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); |
@@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
1646 | 1641 | ||
1647 | /* ...and the same for transmit */ | 1642 | /* ...and the same for transmit */ |
1648 | bdp = fep->tx_bd_base; | 1643 | bdp = fep->tx_bd_base; |
1644 | fep->cur_tx = bdp; | ||
1649 | for (i = 0; i < TX_RING_SIZE; i++) { | 1645 | for (i = 0; i < TX_RING_SIZE; i++) { |
1650 | 1646 | ||
1651 | /* Initialize the BD for every fragment in the page. */ | 1647 | /* Initialize the BD for every fragment in the page. */ |
@@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
1657 | /* Set the last buffer to wrap */ | 1653 | /* Set the last buffer to wrap */ |
1658 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 1654 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); |
1659 | bdp->cbd_sc |= BD_SC_WRAP; | 1655 | bdp->cbd_sc |= BD_SC_WRAP; |
1656 | fep->dirty_tx = bdp; | ||
1660 | 1657 | ||
1661 | fec_restart(ndev, 0); | 1658 | fec_restart(ndev, 0); |
1662 | 1659 | ||
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 01579b8e37c4..c0f63be91ff7 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -214,8 +214,6 @@ struct fec_enet_private { | |||
214 | unsigned char *tx_bounce[TX_RING_SIZE]; | 214 | unsigned char *tx_bounce[TX_RING_SIZE]; |
215 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; | 215 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; |
216 | struct sk_buff *rx_skbuff[RX_RING_SIZE]; | 216 | struct sk_buff *rx_skbuff[RX_RING_SIZE]; |
217 | ushort skb_cur; | ||
218 | ushort skb_dirty; | ||
219 | 217 | ||
220 | /* CPM dual port RAM relative addresses */ | 218 | /* CPM dual port RAM relative addresses */ |
221 | dma_addr_t bd_dma; | 219 | dma_addr_t bd_dma; |
@@ -227,7 +225,6 @@ struct fec_enet_private { | |||
227 | /* The ring entries to be free()ed */ | 225 | /* The ring entries to be free()ed */ |
228 | struct bufdesc *dirty_tx; | 226 | struct bufdesc *dirty_tx; |
229 | 227 | ||
230 | uint tx_full; | ||
231 | /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ | 228 | /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ |
232 | spinlock_t hw_lock; | 229 | spinlock_t hw_lock; |
233 | 230 | ||