diff options
Diffstat (limited to 'drivers/net/ethoc.c')
-rw-r--r-- | drivers/net/ethoc.c | 76 |
1 files changed, 49 insertions, 27 deletions
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index a12a07ea02b6..43431ffcf6c1 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c | |||
@@ -495,29 +495,42 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) | |||
495 | return 0; | 495 | return 0; |
496 | } | 496 | } |
497 | 497 | ||
498 | static void ethoc_tx(struct net_device *dev) | 498 | static int ethoc_tx(struct net_device *dev, int limit) |
499 | { | 499 | { |
500 | struct ethoc *priv = netdev_priv(dev); | 500 | struct ethoc *priv = netdev_priv(dev); |
501 | int count; | ||
502 | struct ethoc_bd bd; | ||
501 | 503 | ||
502 | spin_lock(&priv->lock); | 504 | for (count = 0; count < limit; ++count) { |
505 | unsigned int entry; | ||
503 | 506 | ||
504 | while (priv->dty_tx != priv->cur_tx) { | 507 | entry = priv->dty_tx % priv->num_tx; |
505 | unsigned int entry = priv->dty_tx % priv->num_tx; | ||
506 | struct ethoc_bd bd; | ||
507 | 508 | ||
508 | ethoc_read_bd(priv, entry, &bd); | 509 | ethoc_read_bd(priv, entry, &bd); |
509 | if (bd.stat & TX_BD_READY) | ||
510 | break; | ||
511 | 510 | ||
512 | entry = (++priv->dty_tx) % priv->num_tx; | 511 | if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) { |
512 | ethoc_ack_irq(priv, INT_MASK_TX); | ||
513 | /* If interrupt came in between reading in the BD | ||
514 | * and clearing the interrupt source, then we risk | ||
515 | * missing the event as the TX interrupt won't trigger | ||
516 | * right away when we reenable it; hence, check | ||
517 | * BD_EMPTY here again to make sure there isn't such an | ||
518 | * event pending... | ||
519 | */ | ||
520 | ethoc_read_bd(priv, entry, &bd); | ||
521 | if (bd.stat & TX_BD_READY || | ||
522 | (priv->dty_tx == priv->cur_tx)) | ||
523 | break; | ||
524 | } | ||
525 | |||
513 | (void)ethoc_update_tx_stats(priv, &bd); | 526 | (void)ethoc_update_tx_stats(priv, &bd); |
527 | priv->dty_tx++; | ||
514 | } | 528 | } |
515 | 529 | ||
516 | if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) | 530 | if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) |
517 | netif_wake_queue(dev); | 531 | netif_wake_queue(dev); |
518 | 532 | ||
519 | ethoc_ack_irq(priv, INT_MASK_TX); | 533 | return count; |
520 | spin_unlock(&priv->lock); | ||
521 | } | 534 | } |
522 | 535 | ||
523 | static irqreturn_t ethoc_interrupt(int irq, void *dev_id) | 536 | static irqreturn_t ethoc_interrupt(int irq, void *dev_id) |
@@ -525,32 +538,38 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id) | |||
525 | struct net_device *dev = dev_id; | 538 | struct net_device *dev = dev_id; |
526 | struct ethoc *priv = netdev_priv(dev); | 539 | struct ethoc *priv = netdev_priv(dev); |
527 | u32 pending; | 540 | u32 pending; |
528 | 541 | u32 mask; | |
529 | ethoc_disable_irq(priv, INT_MASK_ALL); | 542 | |
543 | /* Figure out what triggered the interrupt... | ||
544 | * The tricky bit here is that the interrupt source bits get | ||
545 | * set in INT_SOURCE for an event irregardless of whether that | ||
546 | * event is masked or not. Thus, in order to figure out what | ||
547 | * triggered the interrupt, we need to remove the sources | ||
548 | * for all events that are currently masked. This behaviour | ||
549 | * is not particularly well documented but reasonable... | ||
550 | */ | ||
551 | mask = ethoc_read(priv, INT_MASK); | ||
530 | pending = ethoc_read(priv, INT_SOURCE); | 552 | pending = ethoc_read(priv, INT_SOURCE); |
553 | pending &= mask; | ||
554 | |||
531 | if (unlikely(pending == 0)) { | 555 | if (unlikely(pending == 0)) { |
532 | ethoc_enable_irq(priv, INT_MASK_ALL); | ||
533 | return IRQ_NONE; | 556 | return IRQ_NONE; |
534 | } | 557 | } |
535 | 558 | ||
536 | ethoc_ack_irq(priv, pending); | 559 | ethoc_ack_irq(priv, pending); |
537 | 560 | ||
561 | /* We always handle the dropped packet interrupt */ | ||
538 | if (pending & INT_MASK_BUSY) { | 562 | if (pending & INT_MASK_BUSY) { |
539 | dev_err(&dev->dev, "packet dropped\n"); | 563 | dev_err(&dev->dev, "packet dropped\n"); |
540 | dev->stats.rx_dropped++; | 564 | dev->stats.rx_dropped++; |
541 | } | 565 | } |
542 | 566 | ||
543 | if (pending & INT_MASK_RX) { | 567 | /* Handle receive/transmit event by switching to polling */ |
544 | if (napi_schedule_prep(&priv->napi)) | 568 | if (pending & (INT_MASK_TX | INT_MASK_RX)) { |
545 | __napi_schedule(&priv->napi); | 569 | ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX); |
546 | } else { | 570 | napi_schedule(&priv->napi); |
547 | ethoc_enable_irq(priv, INT_MASK_RX); | ||
548 | } | 571 | } |
549 | 572 | ||
550 | if (pending & INT_MASK_TX) | ||
551 | ethoc_tx(dev); | ||
552 | |||
553 | ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX); | ||
554 | return IRQ_HANDLED; | 573 | return IRQ_HANDLED; |
555 | } | 574 | } |
556 | 575 | ||
@@ -576,15 +595,18 @@ static int ethoc_get_mac_address(struct net_device *dev, void *addr) | |||
576 | static int ethoc_poll(struct napi_struct *napi, int budget) | 595 | static int ethoc_poll(struct napi_struct *napi, int budget) |
577 | { | 596 | { |
578 | struct ethoc *priv = container_of(napi, struct ethoc, napi); | 597 | struct ethoc *priv = container_of(napi, struct ethoc, napi); |
579 | int work_done = 0; | 598 | int rx_work_done = 0; |
599 | int tx_work_done = 0; | ||
600 | |||
601 | rx_work_done = ethoc_rx(priv->netdev, budget); | ||
602 | tx_work_done = ethoc_tx(priv->netdev, budget); | ||
580 | 603 | ||
581 | work_done = ethoc_rx(priv->netdev, budget); | 604 | if (rx_work_done < budget && tx_work_done < budget) { |
582 | if (work_done < budget) { | ||
583 | napi_complete(napi); | 605 | napi_complete(napi); |
584 | ethoc_enable_irq(priv, INT_MASK_RX); | 606 | ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); |
585 | } | 607 | } |
586 | 608 | ||
587 | return work_done; | 609 | return rx_work_done; |
588 | } | 610 | } |
589 | 611 | ||
590 | static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) | 612 | static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) |