diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/fec.c')
| -rw-r--r-- | drivers/net/ethernet/freescale/fec.c | 118 |
1 files changed, 60 insertions, 58 deletions
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index fccc3bf2141d..911d0253dbb2 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c | |||
| @@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 246 | struct bufdesc *bdp; | 246 | struct bufdesc *bdp; |
| 247 | void *bufaddr; | 247 | void *bufaddr; |
| 248 | unsigned short status; | 248 | unsigned short status; |
| 249 | unsigned long flags; | 249 | unsigned int index; |
| 250 | 250 | ||
| 251 | if (!fep->link) { | 251 | if (!fep->link) { |
| 252 | /* Link is down or autonegotiation is in progress. */ | 252 | /* Link is down or autonegotiation is in progress. */ |
| 253 | return NETDEV_TX_BUSY; | 253 | return NETDEV_TX_BUSY; |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | spin_lock_irqsave(&fep->hw_lock, flags); | ||
| 257 | /* Fill in a Tx ring entry */ | 256 | /* Fill in a Tx ring entry */ |
| 258 | bdp = fep->cur_tx; | 257 | bdp = fep->cur_tx; |
| 259 | 258 | ||
| @@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 264 | * This should not happen, since ndev->tbusy should be set. | 263 | * This should not happen, since ndev->tbusy should be set. |
| 265 | */ | 264 | */ |
| 266 | printk("%s: tx queue full!.\n", ndev->name); | 265 | printk("%s: tx queue full!.\n", ndev->name); |
| 267 | spin_unlock_irqrestore(&fep->hw_lock, flags); | ||
| 268 | return NETDEV_TX_BUSY; | 266 | return NETDEV_TX_BUSY; |
| 269 | } | 267 | } |
| 270 | 268 | ||
| @@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 280 | * 4-byte boundaries. Use bounce buffers to copy data | 278 | * 4-byte boundaries. Use bounce buffers to copy data |
| 281 | * and get it aligned. Ugh. | 279 | * and get it aligned. Ugh. |
| 282 | */ | 280 | */ |
| 281 | if (fep->bufdesc_ex) | ||
| 282 | index = (struct bufdesc_ex *)bdp - | ||
| 283 | (struct bufdesc_ex *)fep->tx_bd_base; | ||
| 284 | else | ||
| 285 | index = bdp - fep->tx_bd_base; | ||
| 286 | |||
| 283 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { | 287 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { |
| 284 | unsigned int index; | ||
| 285 | if (fep->bufdesc_ex) | ||
| 286 | index = (struct bufdesc_ex *)bdp - | ||
| 287 | (struct bufdesc_ex *)fep->tx_bd_base; | ||
| 288 | else | ||
| 289 | index = bdp - fep->tx_bd_base; | ||
| 290 | memcpy(fep->tx_bounce[index], skb->data, skb->len); | 288 | memcpy(fep->tx_bounce[index], skb->data, skb->len); |
| 291 | bufaddr = fep->tx_bounce[index]; | 289 | bufaddr = fep->tx_bounce[index]; |
| 292 | } | 290 | } |
| @@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 300 | swap_buffer(bufaddr, skb->len); | 298 | swap_buffer(bufaddr, skb->len); |
| 301 | 299 | ||
| 302 | /* Save skb pointer */ | 300 | /* Save skb pointer */ |
| 303 | fep->tx_skbuff[fep->skb_cur] = skb; | 301 | fep->tx_skbuff[index] = skb; |
| 304 | |||
| 305 | ndev->stats.tx_bytes += skb->len; | ||
| 306 | fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; | ||
| 307 | 302 | ||
| 308 | /* Push the data cache so the CPM does not get stale memory | 303 | /* Push the data cache so the CPM does not get stale memory |
| 309 | * data. | 304 | * data. |
| @@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 331 | ebdp->cbd_esc = BD_ENET_TX_INT; | 326 | ebdp->cbd_esc = BD_ENET_TX_INT; |
| 332 | } | 327 | } |
| 333 | } | 328 | } |
| 334 | /* Trigger transmission start */ | ||
| 335 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | ||
| 336 | |||
| 337 | /* If this was the last BD in the ring, start at the beginning again. */ | 329 | /* If this was the last BD in the ring, start at the beginning again. */ |
| 338 | if (status & BD_ENET_TX_WRAP) | 330 | if (status & BD_ENET_TX_WRAP) |
| 339 | bdp = fep->tx_bd_base; | 331 | bdp = fep->tx_bd_base; |
| 340 | else | 332 | else |
| 341 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 333 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); |
| 342 | 334 | ||
| 343 | if (bdp == fep->dirty_tx) { | 335 | fep->cur_tx = bdp; |
| 344 | fep->tx_full = 1; | 336 | |
| 337 | if (fep->cur_tx == fep->dirty_tx) | ||
| 345 | netif_stop_queue(ndev); | 338 | netif_stop_queue(ndev); |
| 346 | } | ||
| 347 | 339 | ||
| 348 | fep->cur_tx = bdp; | 340 | /* Trigger transmission start */ |
| 341 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | ||
| 349 | 342 | ||
| 350 | skb_tx_timestamp(skb); | 343 | skb_tx_timestamp(skb); |
| 351 | 344 | ||
| 352 | spin_unlock_irqrestore(&fep->hw_lock, flags); | ||
| 353 | |||
| 354 | return NETDEV_TX_OK; | 345 | return NETDEV_TX_OK; |
| 355 | } | 346 | } |
| 356 | 347 | ||
| @@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex) | |||
| 406 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) | 397 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) |
| 407 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | 398 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); |
| 408 | 399 | ||
| 409 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | ||
| 410 | fep->cur_rx = fep->rx_bd_base; | 400 | fep->cur_rx = fep->rx_bd_base; |
| 411 | 401 | ||
| 412 | /* Reset SKB transmit buffers. */ | ||
| 413 | fep->skb_cur = fep->skb_dirty = 0; | ||
| 414 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { | 402 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
| 415 | if (fep->tx_skbuff[i]) { | 403 | if (fep->tx_skbuff[i]) { |
| 416 | dev_kfree_skb_any(fep->tx_skbuff[i]); | 404 | dev_kfree_skb_any(fep->tx_skbuff[i]); |
| @@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev) | |||
| 573 | struct bufdesc *bdp; | 561 | struct bufdesc *bdp; |
| 574 | unsigned short status; | 562 | unsigned short status; |
| 575 | struct sk_buff *skb; | 563 | struct sk_buff *skb; |
| 564 | int index = 0; | ||
| 576 | 565 | ||
| 577 | fep = netdev_priv(ndev); | 566 | fep = netdev_priv(ndev); |
| 578 | spin_lock(&fep->hw_lock); | ||
| 579 | bdp = fep->dirty_tx; | 567 | bdp = fep->dirty_tx; |
| 580 | 568 | ||
| 569 | /* get next bdp of dirty_tx */ | ||
| 570 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | ||
| 571 | bdp = fep->tx_bd_base; | ||
| 572 | else | ||
| 573 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
| 574 | |||
| 581 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { | 575 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
| 582 | if (bdp == fep->cur_tx && fep->tx_full == 0) | 576 | |
| 577 | /* current queue is empty */ | ||
| 578 | if (bdp == fep->cur_tx) | ||
| 583 | break; | 579 | break; |
| 584 | 580 | ||
| 581 | if (fep->bufdesc_ex) | ||
| 582 | index = (struct bufdesc_ex *)bdp - | ||
| 583 | (struct bufdesc_ex *)fep->tx_bd_base; | ||
| 584 | else | ||
| 585 | index = bdp - fep->tx_bd_base; | ||
| 586 | |||
| 585 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | 587 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, |
| 586 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | 588 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); |
| 587 | bdp->cbd_bufaddr = 0; | 589 | bdp->cbd_bufaddr = 0; |
| 588 | 590 | ||
| 589 | skb = fep->tx_skbuff[fep->skb_dirty]; | 591 | skb = fep->tx_skbuff[index]; |
| 592 | |||
| 590 | /* Check for errors. */ | 593 | /* Check for errors. */ |
| 591 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 594 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
| 592 | BD_ENET_TX_RL | BD_ENET_TX_UN | | 595 | BD_ENET_TX_RL | BD_ENET_TX_UN | |
| @@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev) | |||
| 631 | 634 | ||
| 632 | /* Free the sk buffer associated with this last transmit */ | 635 | /* Free the sk buffer associated with this last transmit */ |
| 633 | dev_kfree_skb_any(skb); | 636 | dev_kfree_skb_any(skb); |
| 634 | fep->tx_skbuff[fep->skb_dirty] = NULL; | 637 | fep->tx_skbuff[index] = NULL; |
| 635 | fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; | 638 | |
| 639 | fep->dirty_tx = bdp; | ||
| 636 | 640 | ||
| 637 | /* Update pointer to next buffer descriptor to be transmitted */ | 641 | /* Update pointer to next buffer descriptor to be transmitted */ |
| 638 | if (status & BD_ENET_TX_WRAP) | 642 | if (status & BD_ENET_TX_WRAP) |
| @@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev) | |||
| 642 | 646 | ||
| 643 | /* Since we have freed up a buffer, the ring is no longer full | 647 | /* Since we have freed up a buffer, the ring is no longer full |
| 644 | */ | 648 | */ |
| 645 | if (fep->tx_full) { | 649 | if (fep->dirty_tx != fep->cur_tx) { |
| 646 | fep->tx_full = 0; | ||
| 647 | if (netif_queue_stopped(ndev)) | 650 | if (netif_queue_stopped(ndev)) |
| 648 | netif_wake_queue(ndev); | 651 | netif_wake_queue(ndev); |
| 649 | } | 652 | } |
| 650 | } | 653 | } |
| 651 | fep->dirty_tx = bdp; | 654 | return; |
| 652 | spin_unlock(&fep->hw_lock); | ||
| 653 | } | 655 | } |
| 654 | 656 | ||
| 655 | 657 | ||
| @@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
| 816 | int_events = readl(fep->hwp + FEC_IEVENT); | 818 | int_events = readl(fep->hwp + FEC_IEVENT); |
| 817 | writel(int_events, fep->hwp + FEC_IEVENT); | 819 | writel(int_events, fep->hwp + FEC_IEVENT); |
| 818 | 820 | ||
| 819 | if (int_events & FEC_ENET_RXF) { | 821 | if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) { |
| 820 | ret = IRQ_HANDLED; | 822 | ret = IRQ_HANDLED; |
| 821 | 823 | ||
| 822 | /* Disable the RX interrupt */ | 824 | /* Disable the RX interrupt */ |
| @@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
| 827 | } | 829 | } |
| 828 | } | 830 | } |
| 829 | 831 | ||
| 830 | /* Transmit OK, or non-fatal error. Update the buffer | ||
| 831 | * descriptors. FEC handles all errors, we just discover | ||
| 832 | * them as part of the transmit process. | ||
| 833 | */ | ||
| 834 | if (int_events & FEC_ENET_TXF) { | ||
| 835 | ret = IRQ_HANDLED; | ||
| 836 | fec_enet_tx(ndev); | ||
| 837 | } | ||
| 838 | |||
| 839 | if (int_events & FEC_ENET_MII) { | 832 | if (int_events & FEC_ENET_MII) { |
| 840 | ret = IRQ_HANDLED; | 833 | ret = IRQ_HANDLED; |
| 841 | complete(&fep->mdio_done); | 834 | complete(&fep->mdio_done); |
| @@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) | |||
| 851 | int pkts = fec_enet_rx(ndev, budget); | 844 | int pkts = fec_enet_rx(ndev, budget); |
| 852 | struct fec_enet_private *fep = netdev_priv(ndev); | 845 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 853 | 846 | ||
| 847 | fec_enet_tx(ndev); | ||
| 848 | |||
| 854 | if (pkts < budget) { | 849 | if (pkts < budget) { |
| 855 | napi_complete(napi); | 850 | napi_complete(napi); |
| 856 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | 851 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); |
| @@ -939,24 +934,28 @@ static void fec_enet_adjust_link(struct net_device *ndev) | |||
| 939 | goto spin_unlock; | 934 | goto spin_unlock; |
| 940 | } | 935 | } |
| 941 | 936 | ||
| 942 | /* Duplex link change */ | ||
| 943 | if (phy_dev->link) { | 937 | if (phy_dev->link) { |
| 944 | if (fep->full_duplex != phy_dev->duplex) { | 938 | if (!fep->link) { |
| 945 | fec_restart(ndev, phy_dev->duplex); | ||
| 946 | /* prevent unnecessary second fec_restart() below */ | ||
| 947 | fep->link = phy_dev->link; | 939 | fep->link = phy_dev->link; |
| 948 | status_change = 1; | 940 | status_change = 1; |
| 949 | } | 941 | } |
| 950 | } | ||
| 951 | 942 | ||
| 952 | /* Link on or off change */ | 943 | if (fep->full_duplex != phy_dev->duplex) |
| 953 | if (phy_dev->link != fep->link) { | 944 | status_change = 1; |
| 954 | fep->link = phy_dev->link; | 945 | |
| 955 | if (phy_dev->link) | 946 | if (phy_dev->speed != fep->speed) { |
| 947 | fep->speed = phy_dev->speed; | ||
| 948 | status_change = 1; | ||
| 949 | } | ||
| 950 | |||
| 951 | /* if any of the above changed restart the FEC */ | ||
| 952 | if (status_change) | ||
| 956 | fec_restart(ndev, phy_dev->duplex); | 953 | fec_restart(ndev, phy_dev->duplex); |
| 957 | else | 954 | } else { |
| 955 | if (fep->link) { | ||
| 958 | fec_stop(ndev); | 956 | fec_stop(ndev); |
| 959 | status_change = 1; | 957 | status_change = 1; |
| 958 | } | ||
| 960 | } | 959 | } |
| 961 | 960 | ||
| 962 | spin_unlock: | 961 | spin_unlock: |
| @@ -1333,7 +1332,7 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) | |||
| 1333 | static void fec_enet_free_buffers(struct net_device *ndev) | 1332 | static void fec_enet_free_buffers(struct net_device *ndev) |
| 1334 | { | 1333 | { |
| 1335 | struct fec_enet_private *fep = netdev_priv(ndev); | 1334 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 1336 | int i; | 1335 | unsigned int i; |
| 1337 | struct sk_buff *skb; | 1336 | struct sk_buff *skb; |
| 1338 | struct bufdesc *bdp; | 1337 | struct bufdesc *bdp; |
| 1339 | 1338 | ||
| @@ -1357,7 +1356,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) | |||
| 1357 | static int fec_enet_alloc_buffers(struct net_device *ndev) | 1356 | static int fec_enet_alloc_buffers(struct net_device *ndev) |
| 1358 | { | 1357 | { |
| 1359 | struct fec_enet_private *fep = netdev_priv(ndev); | 1358 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 1360 | int i; | 1359 | unsigned int i; |
| 1361 | struct sk_buff *skb; | 1360 | struct sk_buff *skb; |
| 1362 | struct bufdesc *bdp; | 1361 | struct bufdesc *bdp; |
| 1363 | 1362 | ||
| @@ -1442,6 +1441,7 @@ fec_enet_close(struct net_device *ndev) | |||
| 1442 | struct fec_enet_private *fep = netdev_priv(ndev); | 1441 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 1443 | 1442 | ||
| 1444 | /* Don't know what to do yet. */ | 1443 | /* Don't know what to do yet. */ |
| 1444 | napi_disable(&fep->napi); | ||
| 1445 | fep->opened = 0; | 1445 | fep->opened = 0; |
| 1446 | netif_stop_queue(ndev); | 1446 | netif_stop_queue(ndev); |
| 1447 | fec_stop(ndev); | 1447 | fec_stop(ndev); |
| @@ -1598,7 +1598,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
| 1598 | struct fec_enet_private *fep = netdev_priv(ndev); | 1598 | struct fec_enet_private *fep = netdev_priv(ndev); |
| 1599 | struct bufdesc *cbd_base; | 1599 | struct bufdesc *cbd_base; |
| 1600 | struct bufdesc *bdp; | 1600 | struct bufdesc *bdp; |
| 1601 | int i; | 1601 | unsigned int i; |
| 1602 | 1602 | ||
| 1603 | /* Allocate memory for buffer descriptors. */ | 1603 | /* Allocate memory for buffer descriptors. */ |
| 1604 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, | 1604 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, |
| @@ -1646,6 +1646,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
| 1646 | 1646 | ||
| 1647 | /* ...and the same for transmit */ | 1647 | /* ...and the same for transmit */ |
| 1648 | bdp = fep->tx_bd_base; | 1648 | bdp = fep->tx_bd_base; |
| 1649 | fep->cur_tx = bdp; | ||
| 1649 | for (i = 0; i < TX_RING_SIZE; i++) { | 1650 | for (i = 0; i < TX_RING_SIZE; i++) { |
| 1650 | 1651 | ||
| 1651 | /* Initialize the BD for every fragment in the page. */ | 1652 | /* Initialize the BD for every fragment in the page. */ |
| @@ -1657,6 +1658,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
| 1657 | /* Set the last buffer to wrap */ | 1658 | /* Set the last buffer to wrap */ |
| 1658 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 1659 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); |
| 1659 | bdp->cbd_sc |= BD_SC_WRAP; | 1660 | bdp->cbd_sc |= BD_SC_WRAP; |
| 1661 | fep->dirty_tx = bdp; | ||
| 1660 | 1662 | ||
| 1661 | fec_restart(ndev, 0); | 1663 | fec_restart(ndev, 0); |
| 1662 | 1664 | ||
