aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Ferre <nicolas.ferre@atmel.com>2012-10-31 02:04:57 -0400
committerDavid S. Miller <davem@davemloft.net>2012-11-01 11:45:58 -0400
commite86cd53afc5907f7c221b709916e2dd354e14691 (patch)
tree9c07152729ebed99f79b910102e39113c0323ea5
parentd1d1b53d9d28c8e44a72fadae491702b36e2e1fb (diff)
net/macb: better manage tx errors
Handle all TX errors, not only underruns. TX error management is deferred to a dedicated workqueue. Reinitialize the TX ring after treating all remaining frames, and restart the controller when everything has been cleaned up properly. Napi is not stopped during this task as the driver only handles napi for RX for now. With this sequence, we do not need a special check during the xmit method as the packets will be caught by TX disable during workqueue execution. Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com> Tested-by: Joachim Eastwood <manabian@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/cadence/macb.c166
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
2 files changed, 113 insertions, 54 deletions
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 78488b4c6fb2..d5b52ff6d586 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -44,6 +44,16 @@
44 44
45#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 45#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
46 | MACB_BIT(ISR_ROVR)) 46 | MACB_BIT(ISR_ROVR))
47#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
48 | MACB_BIT(ISR_RLE) \
49 | MACB_BIT(TXERR))
50#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
51
52/*
53 * Graceful stop timeouts in us. We should allow up to
54 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
55 */
56#define MACB_HALT_TIMEOUT 1230
47 57
48/* Ring buffer accessors */ 58/* Ring buffer accessors */
49static unsigned int macb_tx_ring_wrap(unsigned int index) 59static unsigned int macb_tx_ring_wrap(unsigned int index)
@@ -339,66 +349,113 @@ static void macb_update_stats(struct macb *bp)
339 *p += __raw_readl(reg); 349 *p += __raw_readl(reg);
340} 350}
341 351
342static void macb_tx(struct macb *bp) 352static int macb_halt_tx(struct macb *bp)
343{ 353{
344 unsigned int tail; 354 unsigned long halt_time, timeout;
345 unsigned int head; 355 u32 status;
346 u32 status;
347 356
348 status = macb_readl(bp, TSR); 357 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
349 macb_writel(bp, TSR, status);
350 358
351 netdev_vdbg(bp->dev, "macb_tx status = 0x%03lx\n", (unsigned long)status); 359 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
360 do {
361 halt_time = jiffies;
362 status = macb_readl(bp, TSR);
363 if (!(status & MACB_BIT(TGO)))
364 return 0;
352 365
353 if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { 366 usleep_range(10, 250);
354 int i; 367 } while (time_before(halt_time, timeout));
355 netdev_err(bp->dev, "TX %s, resetting buffers\n",
356 status & MACB_BIT(UND) ?
357 "underrun" : "retry limit exceeded");
358 368
359 /* Transfer ongoing, disable transmitter, to avoid confusion */ 369 return -ETIMEDOUT;
360 if (status & MACB_BIT(TGO)) 370}
361 macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
362 371
363 head = bp->tx_head; 372static void macb_tx_error_task(struct work_struct *work)
373{
374 struct macb *bp = container_of(work, struct macb, tx_error_task);
375 struct macb_tx_skb *tx_skb;
376 struct sk_buff *skb;
377 unsigned int tail;
364 378
365 /*Mark all the buffer as used to avoid sending a lost buffer*/ 379 netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
366 for (i = 0; i < TX_RING_SIZE; i++) 380 bp->tx_tail, bp->tx_head);
367 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
368 381
369 /* Add wrap bit */ 382 /* Make sure nobody is trying to queue up new packets */
370 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 383 netif_stop_queue(bp->dev);
371 384
372 /* free transmit buffer in upper layer*/ 385 /*
373 for (tail = bp->tx_tail; tail != head; tail++) { 386 * Stop transmission now
374 struct macb_tx_skb *tx_skb; 387 * (in case we have just queued new packets)
375 struct sk_buff *skb; 388 */
389 if (macb_halt_tx(bp))
390 /* Just complain for now, reinitializing TX path can be good */
391 netdev_err(bp->dev, "BUG: halt tx timed out\n");
376 392
377 rmb(); 393 /* No need for the lock here as nobody will interrupt us anymore */
378 394
379 tx_skb = macb_tx_skb(bp, tail); 395 /*
380 skb = tx_skb->skb; 396 * Treat frames in TX queue including the ones that caused the error.
397 * Free transmit buffers in upper layer.
398 */
399 for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
400 struct macb_dma_desc *desc;
401 u32 ctrl;
381 402
382 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, 403 desc = macb_tx_desc(bp, tail);
383 skb->len, DMA_TO_DEVICE); 404 ctrl = desc->ctrl;
384 tx_skb->skb = NULL; 405 tx_skb = macb_tx_skb(bp, tail);
385 dev_kfree_skb_irq(skb); 406 skb = tx_skb->skb;
386 }
387 407
388 bp->tx_head = bp->tx_tail = 0; 408 if (ctrl & MACB_BIT(TX_USED)) {
409 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
410 macb_tx_ring_wrap(tail), skb->data);
411 bp->stats.tx_packets++;
412 bp->stats.tx_bytes += skb->len;
413 } else {
414 /*
415 * "Buffers exhausted mid-frame" errors may only happen
416 * if the driver is buggy, so complain loudly about those.
417 * Statistics are updated by hardware.
418 */
419 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
420 netdev_err(bp->dev,
421 "BUG: TX buffers exhausted mid-frame\n");
389 422
390 /* Enable the transmitter again */ 423 desc->ctrl = ctrl | MACB_BIT(TX_USED);
391 if (status & MACB_BIT(TGO)) 424 }
392 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE)); 425
426 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
427 DMA_TO_DEVICE);
428 tx_skb->skb = NULL;
429 dev_kfree_skb(skb);
393 } 430 }
394 431
395 if (!(status & MACB_BIT(COMP))) 432 /* Make descriptor updates visible to hardware */
396 /* 433 wmb();
397 * This may happen when a buffer becomes complete 434
398 * between reading the ISR and scanning the 435 /* Reinitialize the TX desc queue */
399 * descriptors. Nothing to worry about. 436 macb_writel(bp, TBQP, bp->tx_ring_dma);
400 */ 437 /* Make TX ring reflect state of hardware */
401 return; 438 bp->tx_head = bp->tx_tail = 0;
439
440 /* Now we are ready to start transmission again */
441 netif_wake_queue(bp->dev);
442
443 /* Housework before enabling TX IRQ */
444 macb_writel(bp, TSR, macb_readl(bp, TSR));
445 macb_writel(bp, IER, MACB_TX_INT_FLAGS);
446}
447
448static void macb_tx_interrupt(struct macb *bp)
449{
450 unsigned int tail;
451 unsigned int head;
452 u32 status;
453
454 status = macb_readl(bp, TSR);
455 macb_writel(bp, TSR, status);
456
457 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
458 (unsigned long)status);
402 459
403 head = bp->tx_head; 460 head = bp->tx_head;
404 for (tail = bp->tx_tail; tail != head; tail++) { 461 for (tail = bp->tx_tail; tail != head; tail++) {
@@ -638,9 +695,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
638 } 695 }
639 } 696 }
640 697
641 if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) | 698 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
642 MACB_BIT(ISR_RLE))) 699 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
643 macb_tx(bp); 700 schedule_work(&bp->tx_error_task);
701 break;
702 }
703
704 if (status & MACB_BIT(TCOMP))
705 macb_tx_interrupt(bp);
644 706
645 /* 707 /*
646 * Link change detection isn't possible with RMII, so we'll 708 * Link change detection isn't possible with RMII, so we'll
@@ -970,13 +1032,8 @@ static void macb_init_hw(struct macb *bp)
970 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 1032 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
971 1033
972 /* Enable interrupts */ 1034 /* Enable interrupts */
973 macb_writel(bp, IER, (MACB_BIT(RCOMP) 1035 macb_writel(bp, IER, (MACB_RX_INT_FLAGS
974 | MACB_BIT(RXUBR) 1036 | MACB_TX_INT_FLAGS
975 | MACB_BIT(ISR_TUND)
976 | MACB_BIT(ISR_RLE)
977 | MACB_BIT(TXERR)
978 | MACB_BIT(TCOMP)
979 | MACB_BIT(ISR_ROVR)
980 | MACB_BIT(HRESP))); 1037 | MACB_BIT(HRESP)));
981 1038
982} 1039}
@@ -1428,6 +1485,7 @@ static int __init macb_probe(struct platform_device *pdev)
1428 bp->dev = dev; 1485 bp->dev = dev;
1429 1486
1430 spin_lock_init(&bp->lock); 1487 spin_lock_init(&bp->lock);
1488 INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
1431 1489
1432 bp->pclk = clk_get(&pdev->dev, "pclk"); 1490 bp->pclk = clk_get(&pdev->dev, "pclk");
1433 if (IS_ERR(bp->pclk)) { 1491 if (IS_ERR(bp->pclk)) {
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 232dca6b5839..4235ab871ab4 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -538,6 +538,7 @@ struct macb {
538 struct clk *hclk; 538 struct clk *hclk;
539 struct net_device *dev; 539 struct net_device *dev;
540 struct napi_struct napi; 540 struct napi_struct napi;
541 struct work_struct tx_error_task;
541 struct net_device_stats stats; 542 struct net_device_stats stats;
542 union { 543 union {
543 struct macb_stats macb; 544 struct macb_stats macb;