diff options
Diffstat (limited to 'drivers/net/chelsio/sge.c')
-rw-r--r-- | drivers/net/chelsio/sge.c | 70 |
1 files changed, 5 insertions, 65 deletions
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 8a7efd38e95b..d6c7d2aa761b 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1396,20 +1396,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) | |||
1396 | 1396 | ||
1397 | if (unlikely(adapter->vlan_grp && p->vlan_valid)) { | 1397 | if (unlikely(adapter->vlan_grp && p->vlan_valid)) { |
1398 | st->vlan_xtract++; | 1398 | st->vlan_xtract++; |
1399 | #ifdef CONFIG_CHELSIO_T1_NAPI | 1399 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, |
1400 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, | 1400 | ntohs(p->vlan)); |
1401 | ntohs(p->vlan)); | 1401 | } else |
1402 | #else | ||
1403 | vlan_hwaccel_rx(skb, adapter->vlan_grp, | ||
1404 | ntohs(p->vlan)); | ||
1405 | #endif | ||
1406 | } else { | ||
1407 | #ifdef CONFIG_CHELSIO_T1_NAPI | ||
1408 | netif_receive_skb(skb); | 1402 | netif_receive_skb(skb); |
1409 | #else | ||
1410 | netif_rx(skb); | ||
1411 | #endif | ||
1412 | } | ||
1413 | } | 1403 | } |
1414 | 1404 | ||
1415 | /* | 1405 | /* |
@@ -1568,7 +1558,6 @@ static inline int responses_pending(const struct adapter *adapter) | |||
1568 | return (e->GenerationBit == Q->genbit); | 1558 | return (e->GenerationBit == Q->genbit); |
1569 | } | 1559 | } |
1570 | 1560 | ||
1571 | #ifdef CONFIG_CHELSIO_T1_NAPI | ||
1572 | /* | 1561 | /* |
1573 | * A simpler version of process_responses() that handles only pure (i.e., | 1562 | * A simpler version of process_responses() that handles only pure (i.e., |
1574 | * non data-carrying) responses. Such respones are too light-weight to justify | 1563 | * non data-carrying) responses. Such respones are too light-weight to justify |
@@ -1636,9 +1625,6 @@ int t1_poll(struct napi_struct *napi, int budget) | |||
1636 | return work_done; | 1625 | return work_done; |
1637 | } | 1626 | } |
1638 | 1627 | ||
1639 | /* | ||
1640 | * NAPI version of the main interrupt handler. | ||
1641 | */ | ||
1642 | irqreturn_t t1_interrupt(int irq, void *data) | 1628 | irqreturn_t t1_interrupt(int irq, void *data) |
1643 | { | 1629 | { |
1644 | struct adapter *adapter = data; | 1630 | struct adapter *adapter = data; |
@@ -1656,7 +1642,8 @@ irqreturn_t t1_interrupt(int irq, void *data) | |||
1656 | else { | 1642 | else { |
1657 | /* no data, no NAPI needed */ | 1643 | /* no data, no NAPI needed */ |
1658 | writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); | 1644 | writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); |
1659 | napi_enable(&adapter->napi); /* undo schedule_prep */ | 1645 | /* undo schedule_prep */ |
1646 | napi_enable(&adapter->napi); | ||
1660 | } | 1647 | } |
1661 | } | 1648 | } |
1662 | return IRQ_HANDLED; | 1649 | return IRQ_HANDLED; |
@@ -1672,53 +1659,6 @@ irqreturn_t t1_interrupt(int irq, void *data) | |||
1672 | return IRQ_RETVAL(handled != 0); | 1659 | return IRQ_RETVAL(handled != 0); |
1673 | } | 1660 | } |
1674 | 1661 | ||
1675 | #else | ||
1676 | /* | ||
1677 | * Main interrupt handler, optimized assuming that we took a 'DATA' | ||
1678 | * interrupt. | ||
1679 | * | ||
1680 | * 1. Clear the interrupt | ||
1681 | * 2. Loop while we find valid descriptors and process them; accumulate | ||
1682 | * information that can be processed after the loop | ||
1683 | * 3. Tell the SGE at which index we stopped processing descriptors | ||
1684 | * 4. Bookkeeping; free TX buffers, ring doorbell if there are any | ||
1685 | * outstanding TX buffers waiting, replenish RX buffers, potentially | ||
1686 | * reenable upper layers if they were turned off due to lack of TX | ||
1687 | * resources which are available again. | ||
1688 | * 5. If we took an interrupt, but no valid respQ descriptors was found we | ||
1689 | * let the slow_intr_handler run and do error handling. | ||
1690 | */ | ||
1691 | irqreturn_t t1_interrupt(int irq, void *cookie) | ||
1692 | { | ||
1693 | int work_done; | ||
1694 | struct adapter *adapter = cookie; | ||
1695 | struct respQ *Q = &adapter->sge->respQ; | ||
1696 | |||
1697 | spin_lock(&adapter->async_lock); | ||
1698 | |||
1699 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); | ||
1700 | |||
1701 | if (likely(responses_pending(adapter))) | ||
1702 | work_done = process_responses(adapter, -1); | ||
1703 | else | ||
1704 | work_done = t1_slow_intr_handler(adapter); | ||
1705 | |||
1706 | /* | ||
1707 | * The unconditional clearing of the PL_CAUSE above may have raced | ||
1708 | * with DMA completion and the corresponding generation of a response | ||
1709 | * to cause us to miss the resulting data interrupt. The next write | ||
1710 | * is also unconditional to recover the missed interrupt and render | ||
1711 | * this race harmless. | ||
1712 | */ | ||
1713 | writel(Q->cidx, adapter->regs + A_SG_SLEEPING); | ||
1714 | |||
1715 | if (!work_done) | ||
1716 | adapter->sge->stats.unhandled_irqs++; | ||
1717 | spin_unlock(&adapter->async_lock); | ||
1718 | return IRQ_RETVAL(work_done != 0); | ||
1719 | } | ||
1720 | #endif | ||
1721 | |||
1722 | /* | 1662 | /* |
1723 | * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. | 1663 | * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. |
1724 | * | 1664 | * |