diff options
author | Dai Haruki <dai.haruki@freescale.com> | 2008-04-09 20:37:51 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-04-16 20:06:50 -0400 |
commit | d080cd6301e107e79c6a0fc654319f8979f70549 (patch) | |
tree | 263b443368033c49c8b4345288677ac58cb68533 /drivers/net/gianfar.c | |
parent | 0b50d753874ad4843d305bf841ba5e28fc0f0ce7 (diff) |
gianfar: Support NAPI for TX Frames
Poll the completed TX frames in gfar_poll(). This prevents the tx
completion interrupt from interfering with processing of received
frames.
We also disable hardware rx coalescing when NAPI is enabled.
Signed-off-by: Dai Haruki <dai.haruki@freescale.com>
Signed-off-by: Andy Fleming <afleming@freescale.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r-- | drivers/net/gianfar.c | 56 |
1 files changed, 42 insertions, 14 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 601f93e482c6..c8c3df737d73 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -1250,17 +1250,12 @@ static void gfar_timeout(struct net_device *dev) | |||
1250 | } | 1250 | } |
1251 | 1251 | ||
1252 | /* Interrupt Handler for Transmit complete */ | 1252 | /* Interrupt Handler for Transmit complete */ |
1253 | static irqreturn_t gfar_transmit(int irq, void *dev_id) | 1253 | int gfar_clean_tx_ring(struct net_device *dev) |
1254 | { | 1254 | { |
1255 | struct net_device *dev = (struct net_device *) dev_id; | ||
1256 | struct gfar_private *priv = netdev_priv(dev); | ||
1257 | struct txbd8 *bdp; | 1255 | struct txbd8 *bdp; |
1256 | struct gfar_private *priv = netdev_priv(dev); | ||
1257 | int howmany = 0; | ||
1258 | 1258 | ||
1259 | /* Clear IEVENT */ | ||
1260 | gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); | ||
1261 | |||
1262 | /* Lock priv */ | ||
1263 | spin_lock(&priv->txlock); | ||
1264 | bdp = priv->dirty_tx; | 1259 | bdp = priv->dirty_tx; |
1265 | while ((bdp->status & TXBD_READY) == 0) { | 1260 | while ((bdp->status & TXBD_READY) == 0) { |
1266 | /* If dirty_tx and cur_tx are the same, then either the */ | 1261 | /* If dirty_tx and cur_tx are the same, then either the */ |
@@ -1269,7 +1264,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id) | |||
1269 | if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) | 1264 | if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) |
1270 | break; | 1265 | break; |
1271 | 1266 | ||
1272 | dev->stats.tx_packets++; | 1267 | howmany++; |
1273 | 1268 | ||
1274 | /* Deferred means some collisions occurred during transmit, */ | 1269 | /* Deferred means some collisions occurred during transmit, */ |
1275 | /* but we eventually sent the packet. */ | 1270 | /* but we eventually sent the packet. */ |
@@ -1278,11 +1273,15 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id) | |||
1278 | 1273 | ||
1279 | /* Free the sk buffer associated with this TxBD */ | 1274 | /* Free the sk buffer associated with this TxBD */ |
1280 | dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); | 1275 | dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); |
1276 | |||
1281 | priv->tx_skbuff[priv->skb_dirtytx] = NULL; | 1277 | priv->tx_skbuff[priv->skb_dirtytx] = NULL; |
1282 | priv->skb_dirtytx = | 1278 | priv->skb_dirtytx = |
1283 | (priv->skb_dirtytx + | 1279 | (priv->skb_dirtytx + |
1284 | 1) & TX_RING_MOD_MASK(priv->tx_ring_size); | 1280 | 1) & TX_RING_MOD_MASK(priv->tx_ring_size); |
1285 | 1281 | ||
1282 | /* Clean BD length for empty detection */ | ||
1283 | bdp->length = 0; | ||
1284 | |||
1286 | /* update bdp to point at next bd in the ring (wrapping if necessary) */ | 1285 | /* update bdp to point at next bd in the ring (wrapping if necessary) */ |
1287 | if (bdp->status & TXBD_WRAP) | 1286 | if (bdp->status & TXBD_WRAP) |
1288 | bdp = priv->tx_bd_base; | 1287 | bdp = priv->tx_bd_base; |
@@ -1297,6 +1296,25 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id) | |||
1297 | netif_wake_queue(dev); | 1296 | netif_wake_queue(dev); |
1298 | } /* while ((bdp->status & TXBD_READY) == 0) */ | 1297 | } /* while ((bdp->status & TXBD_READY) == 0) */ |
1299 | 1298 | ||
1299 | dev->stats.tx_packets += howmany; | ||
1300 | |||
1301 | return howmany; | ||
1302 | } | ||
1303 | |||
1304 | /* Interrupt Handler for Transmit complete */ | ||
1305 | static irqreturn_t gfar_transmit(int irq, void *dev_id) | ||
1306 | { | ||
1307 | struct net_device *dev = (struct net_device *) dev_id; | ||
1308 | struct gfar_private *priv = netdev_priv(dev); | ||
1309 | |||
1310 | /* Clear IEVENT */ | ||
1311 | gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); | ||
1312 | |||
1313 | /* Lock priv */ | ||
1314 | spin_lock(&priv->txlock); | ||
1315 | |||
1316 | gfar_clean_tx_ring(dev); | ||
1317 | |||
1300 | /* If we are coalescing the interrupts, reset the timer */ | 1318 | /* If we are coalescing the interrupts, reset the timer */ |
1301 | /* Otherwise, clear it */ | 1319 | /* Otherwise, clear it */ |
1302 | if (likely(priv->txcoalescing)) { | 1320 | if (likely(priv->txcoalescing)) { |
@@ -1392,15 +1410,15 @@ irqreturn_t gfar_receive(int irq, void *dev_id) | |||
1392 | unsigned long flags; | 1410 | unsigned long flags; |
1393 | #endif | 1411 | #endif |
1394 | 1412 | ||
1395 | /* Clear IEVENT, so rx interrupt isn't called again | ||
1396 | * because of this interrupt */ | ||
1397 | gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); | ||
1398 | |||
1399 | /* support NAPI */ | 1413 | /* support NAPI */ |
1400 | #ifdef CONFIG_GFAR_NAPI | 1414 | #ifdef CONFIG_GFAR_NAPI |
1415 | /* Clear IEVENT, so interrupts aren't called again | ||
1416 | * because of the packets that have already arrived */ | ||
1417 | gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); | ||
1418 | |||
1401 | if (netif_rx_schedule_prep(dev, &priv->napi)) { | 1419 | if (netif_rx_schedule_prep(dev, &priv->napi)) { |
1402 | tempval = gfar_read(&priv->regs->imask); | 1420 | tempval = gfar_read(&priv->regs->imask); |
1403 | tempval &= IMASK_RX_DISABLED; | 1421 | tempval &= IMASK_RTX_DISABLED; |
1404 | gfar_write(&priv->regs->imask, tempval); | 1422 | gfar_write(&priv->regs->imask, tempval); |
1405 | 1423 | ||
1406 | __netif_rx_schedule(dev, &priv->napi); | 1424 | __netif_rx_schedule(dev, &priv->napi); |
@@ -1411,6 +1429,9 @@ irqreturn_t gfar_receive(int irq, void *dev_id) | |||
1411 | gfar_read(&priv->regs->imask)); | 1429 | gfar_read(&priv->regs->imask)); |
1412 | } | 1430 | } |
1413 | #else | 1431 | #else |
1432 | /* Clear IEVENT, so rx interrupt isn't called again | ||
1433 | * because of this interrupt */ | ||
1434 | gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); | ||
1414 | 1435 | ||
1415 | spin_lock_irqsave(&priv->rxlock, flags); | 1436 | spin_lock_irqsave(&priv->rxlock, flags); |
1416 | gfar_clean_rx_ring(dev, priv->rx_ring_size); | 1437 | gfar_clean_rx_ring(dev, priv->rx_ring_size); |
@@ -1580,6 +1601,13 @@ static int gfar_poll(struct napi_struct *napi, int budget) | |||
1580 | struct gfar_private *priv = container_of(napi, struct gfar_private, napi); | 1601 | struct gfar_private *priv = container_of(napi, struct gfar_private, napi); |
1581 | struct net_device *dev = priv->dev; | 1602 | struct net_device *dev = priv->dev; |
1582 | int howmany; | 1603 | int howmany; |
1604 | unsigned long flags; | ||
1605 | |||
1606 | /* If we fail to get the lock, don't bother with the TX BDs */ | ||
1607 | if (spin_trylock_irqsave(&priv->txlock, flags)) { | ||
1608 | gfar_clean_tx_ring(dev); | ||
1609 | spin_unlock_irqrestore(&priv->txlock, flags); | ||
1610 | } | ||
1583 | 1611 | ||
1584 | howmany = gfar_clean_rx_ring(dev, budget); | 1612 | howmany = gfar_clean_rx_ring(dev, budget); |
1585 | 1613 | ||