aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/arc/emac_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/arc/emac_main.c')
-rw-r--r--drivers/net/ethernet/arc/emac_main.c53
1 files changed, 37 insertions, 16 deletions
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index fe5cfeace6e3..5919394d9f58 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -30,6 +30,17 @@
30#define DRV_VERSION "1.0" 30#define DRV_VERSION "1.0"
31 31
32/** 32/**
33 * arc_emac_tx_avail - Return the number of available slots in the tx ring.
34 * @priv: Pointer to ARC EMAC private data structure.
35 *
36 * returns: the number of slots available for transmission in tx the ring.
37 */
38static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
39{
40 return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
41}
42
43/**
33 * arc_emac_adjust_link - Adjust the PHY link duplex. 44 * arc_emac_adjust_link - Adjust the PHY link duplex.
34 * @ndev: Pointer to the net_device structure. 45 * @ndev: Pointer to the net_device structure.
35 * 46 *
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
180 txbd->info = 0; 191 txbd->info = 0;
181 192
182 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 193 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
183
184 if (netif_queue_stopped(ndev))
185 netif_wake_queue(ndev);
186 } 194 }
195
196 /* Ensure that txbd_dirty is visible to tx() before checking
197 * for queue stopped.
198 */
199 smp_mb();
200
201 if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
202 netif_wake_queue(ndev);
187} 203}
188 204
189/** 205/**
@@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
298 work_done = arc_emac_rx(ndev, budget); 314 work_done = arc_emac_rx(ndev, budget);
299 if (work_done < budget) { 315 if (work_done < budget) {
300 napi_complete(napi); 316 napi_complete(napi);
301 arc_reg_or(priv, R_ENABLE, RXINT_MASK); 317 arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
302 } 318 }
303 319
304 return work_done; 320 return work_done;
@@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
327 /* Reset all flags except "MDIO complete" */ 343 /* Reset all flags except "MDIO complete" */
328 arc_reg_set(priv, R_STATUS, status); 344 arc_reg_set(priv, R_STATUS, status);
329 345
330 if (status & RXINT_MASK) { 346 if (status & (RXINT_MASK | TXINT_MASK)) {
331 if (likely(napi_schedule_prep(&priv->napi))) { 347 if (likely(napi_schedule_prep(&priv->napi))) {
332 arc_reg_clr(priv, R_ENABLE, RXINT_MASK); 348 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
333 __napi_schedule(&priv->napi); 349 __napi_schedule(&priv->napi);
334 } 350 }
335 } 351 }
@@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev)
440 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); 456 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
441 457
442 /* Enable interrupts */ 458 /* Enable interrupts */
443 arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); 459 arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
444 460
445 /* Set CONTROL */ 461 /* Set CONTROL */
446 arc_reg_set(priv, R_CTRL, 462 arc_reg_set(priv, R_CTRL,
@@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev)
511 netif_stop_queue(ndev); 527 netif_stop_queue(ndev);
512 528
513 /* Disable interrupts */ 529 /* Disable interrupts */
514 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); 530 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
515 531
516 /* Disable EMAC */ 532 /* Disable EMAC */
517 arc_reg_clr(priv, R_CTRL, EN_MASK); 533 arc_reg_clr(priv, R_CTRL, EN_MASK);
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
574 590
575 len = max_t(unsigned int, ETH_ZLEN, skb->len); 591 len = max_t(unsigned int, ETH_ZLEN, skb->len);
576 592
577 /* EMAC still holds this buffer in its possession. 593 if (unlikely(!arc_emac_tx_avail(priv))) {
578 * CPU must not modify this buffer descriptor
579 */
580 if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
581 netif_stop_queue(ndev); 594 netif_stop_queue(ndev);
595 netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
582 return NETDEV_TX_BUSY; 596 return NETDEV_TX_BUSY;
583 } 597 }
584 598
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
607 /* Increment index to point to the next BD */ 621 /* Increment index to point to the next BD */
608 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 622 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
609 623
610 /* Get "info" of the next BD */ 624 /* Ensure that tx_clean() sees the new txbd_curr before
611 info = &priv->txbd[*txbd_curr].info; 625 * checking the queue status. This prevents an unneeded wake
626 * of the queue in tx_clean().
627 */
628 smp_mb();
612 629
613 /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ 630 if (!arc_emac_tx_avail(priv)) {
614 if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
615 netif_stop_queue(ndev); 631 netif_stop_queue(ndev);
632 /* Refresh tx_dirty */
633 smp_mb();
634 if (arc_emac_tx_avail(priv))
635 netif_start_queue(ndev);
636 }
616 637
617 arc_reg_set(priv, R_STATUS, TXPL_MASK); 638 arc_reg_set(priv, R_STATUS, TXPL_MASK);
618 639