aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorClaudiu Manoil <claudiu.manoil@freescale.com>2015-05-06 11:07:29 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-09 17:37:46 -0400
commitbc602280871cdedc906f622b036f5799f16c13c2 (patch)
tree4dd58bdadf78fc6036a4cd385f6582f3b750ea53
parent39d726b76c3981598b73790c4908bc290e2dc326 (diff)
gianfar: Move TxFIFO underrun handling to reset path
Handle TxFIFO underrun exceptions outside the fast path. A controller reset is more reliable in this exceptional case, as opposed to re-enabling on-the-fly the Tx DMA. As the controller reset is handled outside the fast path by the reset_gfar() workqueue handler, the locking scheme on the Tx path is significantly simplified. Because the Tx processing (xmit queues and tx napi) is disabled during controller reset, tstat access from xmit does not require locking. So the scope of the txlock on the processing path is now reduced to num_txbdfree, which is shared only between process context (xmit) and softirq (clean_tx_ring). As a result, the txlock must not guard against interrupt context, and the spin_lock_irqsave() from xmit can be replaced by spin_lock_bh(). Likewise, the locking has been downgraded for clean_tx_ring(). Signed-off-by: Claudiu Manoil <claudiu.manoil@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c40
1 files changed, 10 insertions, 30 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4ee080d49bc0..3c84e5acd42d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2254,7 +2254,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2254 int i, rq = 0; 2254 int i, rq = 0;
2255 int do_tstamp, do_csum, do_vlan; 2255 int do_tstamp, do_csum, do_vlan;
2256 u32 bufaddr; 2256 u32 bufaddr;
2257 unsigned long flags;
2258 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; 2257 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2259 2258
2260 rq = skb->queue_mapping; 2259 rq = skb->queue_mapping;
@@ -2434,19 +2433,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2434 2433
2435 netdev_tx_sent_queue(txq, bytes_sent); 2434 netdev_tx_sent_queue(txq, bytes_sent);
2436 2435
2437 /* We can work in parallel with gfar_clean_tx_ring(), except
2438 * when modifying num_txbdfree. Note that we didn't grab the lock
2439 * when we were reading the num_txbdfree and checking for available
2440 * space, that's because outside of this function it can only grow,
2441 * and once we've got needed space, it cannot suddenly disappear.
2442 *
2443 * The lock also protects us from gfar_error(), which can modify
2444 * regs->tstat and thus retrigger the transfers, which is why we
2445 * also must grab the lock before setting ready bit for the first
2446 * to be transmitted BD.
2447 */
2448 spin_lock_irqsave(&tx_queue->txlock, flags);
2449
2450 gfar_wmb(); 2436 gfar_wmb();
2451 2437
2452 txbdp_start->lstatus = cpu_to_be32(lstatus); 2438 txbdp_start->lstatus = cpu_to_be32(lstatus);
@@ -2463,8 +2449,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2463 2449
2464 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2450 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2465 2451
2452 /* We can work in parallel with gfar_clean_tx_ring(), except
2453 * when modifying num_txbdfree. Note that we didn't grab the lock
2454 * when we were reading the num_txbdfree and checking for available
2455 * space, that's because outside of this function it can only grow.
2456 */
2457 spin_lock_bh(&tx_queue->txlock);
2466 /* reduce TxBD free count */ 2458 /* reduce TxBD free count */
2467 tx_queue->num_txbdfree -= (nr_txbds); 2459 tx_queue->num_txbdfree -= (nr_txbds);
2460 spin_unlock_bh(&tx_queue->txlock);
2468 2461
2469 /* If the next BD still needs to be cleaned up, then the bds 2462 /* If the next BD still needs to be cleaned up, then the bds
2470 * are full. We need to tell the kernel to stop sending us stuff. 2463 * are full. We need to tell the kernel to stop sending us stuff.
@@ -2478,9 +2471,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2478 /* Tell the DMA to go go go */ 2471 /* Tell the DMA to go go go */
2479 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); 2472 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2480 2473
2481 /* Unlock priv */
2482 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2483
2484 return NETDEV_TX_OK; 2474 return NETDEV_TX_OK;
2485 2475
2486dma_map_err: 2476dma_map_err:
@@ -2622,7 +2612,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2622 skb_dirtytx = tx_queue->skb_dirtytx; 2612 skb_dirtytx = tx_queue->skb_dirtytx;
2623 2613
2624 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { 2614 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2625 unsigned long flags;
2626 2615
2627 frags = skb_shinfo(skb)->nr_frags; 2616 frags = skb_shinfo(skb)->nr_frags;
2628 2617
@@ -2686,9 +2675,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2686 TX_RING_MOD_MASK(tx_ring_size); 2675 TX_RING_MOD_MASK(tx_ring_size);
2687 2676
2688 howmany++; 2677 howmany++;
2689 spin_lock_irqsave(&tx_queue->txlock, flags); 2678 spin_lock(&tx_queue->txlock);
2690 tx_queue->num_txbdfree += nr_txbds; 2679 tx_queue->num_txbdfree += nr_txbds;
2691 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2680 spin_unlock(&tx_queue->txlock);
2692 } 2681 }
2693 2682
2694 /* If we freed a buffer, we can restart transmission, if necessary */ 2683 /* If we freed a buffer, we can restart transmission, if necessary */
@@ -3411,21 +3400,12 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
3411 if (events & IEVENT_CRL) 3400 if (events & IEVENT_CRL)
3412 dev->stats.tx_aborted_errors++; 3401 dev->stats.tx_aborted_errors++;
3413 if (events & IEVENT_XFUN) { 3402 if (events & IEVENT_XFUN) {
3414 unsigned long flags;
3415
3416 netif_dbg(priv, tx_err, dev, 3403 netif_dbg(priv, tx_err, dev,
3417 "TX FIFO underrun, packet dropped\n"); 3404 "TX FIFO underrun, packet dropped\n");
3418 dev->stats.tx_dropped++; 3405 dev->stats.tx_dropped++;
3419 atomic64_inc(&priv->extra_stats.tx_underrun); 3406 atomic64_inc(&priv->extra_stats.tx_underrun);
3420 3407
3421 local_irq_save(flags); 3408 schedule_work(&priv->reset_task);
3422 lock_tx_qs(priv);
3423
3424 /* Reactivate the Tx Queues */
3425 gfar_write(&regs->tstat, gfargrp->tstat);
3426
3427 unlock_tx_qs(priv);
3428 local_irq_restore(flags);
3429 } 3409 }
3430 netif_dbg(priv, tx_err, dev, "Transmit Error\n"); 3410 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3431 } 3411 }