aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBeniamino Galvani <b.galvani@gmail.com>2014-09-10 16:50:02 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-12 17:17:49 -0400
commit7ce7679d6bbd1715799a9cf17b9b558bc2d962b7 (patch)
treeee0ef95db8dbe1bdc567ea1a7ee9cb0838e02601
parent1d7efe9dfaa6025acd29a726315f6f7d30a9f1ca (diff)
net: arc_emac: enable tx interrupts
In the current implementation the cleaning of tx ring is done by the NAPI poll handler, which is scheduled after rx interrupts. Thus, in absence of received packets the reclaim of used tx buffers is never executed, blocking further transmission. This can be easily reproduced starting the transmission of a UDP flow with iperf, which blocks almost immediately because skbs are not returned to the stack and the socket send buffer becomes full. The patch enables tx interrupts so that the tx reclaim is scheduled after completed transmissions. Signed-off-by: Beniamino Galvani <b.galvani@gmail.com> Reviewed-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/arc/emac_main.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index fe5cfeace6e3..f7ab90d9cd7e 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -298,7 +298,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
298 work_done = arc_emac_rx(ndev, budget); 298 work_done = arc_emac_rx(ndev, budget);
299 if (work_done < budget) { 299 if (work_done < budget) {
300 napi_complete(napi); 300 napi_complete(napi);
301 arc_reg_or(priv, R_ENABLE, RXINT_MASK); 301 arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
302 } 302 }
303 303
304 return work_done; 304 return work_done;
@@ -327,9 +327,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
327 /* Reset all flags except "MDIO complete" */ 327 /* Reset all flags except "MDIO complete" */
328 arc_reg_set(priv, R_STATUS, status); 328 arc_reg_set(priv, R_STATUS, status);
329 329
330 if (status & RXINT_MASK) { 330 if (status & (RXINT_MASK | TXINT_MASK)) {
331 if (likely(napi_schedule_prep(&priv->napi))) { 331 if (likely(napi_schedule_prep(&priv->napi))) {
332 arc_reg_clr(priv, R_ENABLE, RXINT_MASK); 332 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
333 __napi_schedule(&priv->napi); 333 __napi_schedule(&priv->napi);
334 } 334 }
335 } 335 }
@@ -440,7 +440,7 @@ static int arc_emac_open(struct net_device *ndev)
440 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); 440 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
441 441
442 /* Enable interrupts */ 442 /* Enable interrupts */
443 arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); 443 arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
444 444
445 /* Set CONTROL */ 445 /* Set CONTROL */
446 arc_reg_set(priv, R_CTRL, 446 arc_reg_set(priv, R_CTRL,
@@ -511,7 +511,7 @@ static int arc_emac_stop(struct net_device *ndev)
511 netif_stop_queue(ndev); 511 netif_stop_queue(ndev);
512 512
513 /* Disable interrupts */ 513 /* Disable interrupts */
514 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); 514 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
515 515
516 /* Disable EMAC */ 516 /* Disable EMAC */
517 arc_reg_clr(priv, R_CTRL, EN_MASK); 517 arc_reg_clr(priv, R_CTRL, EN_MASK);