diff options
author | Divy Le Ray <divy@chelsio.com> | 2007-01-30 22:43:56 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-05 16:58:50 -0500 |
commit | 6195c71d652d337521ec8431c0923a85d6aaaf71 (patch) | |
tree | e7da351c03745d4eacb9d2b48877850adadc6f2b /drivers | |
parent | 14ab989245069907622ab9fd930275c086cee069 (diff) |
cxgb3 - remove SW Tx credits coalescing
Remove tx credit coalescing done in SW.
The HW is caring care of it already.
Signed-off-by: Divy Le Ray <divy@chelsio.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/cxgb3/sge.c | 75 |
1 files changed, 14 insertions, 61 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index ccea06a04402..8b3c8246efba 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -1550,33 +1550,6 @@ static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, | |||
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | /** | 1552 | /** |
1553 | * update_tx_completed - update the number of processed Tx descriptors | ||
1554 | * @qs: the queue set to update | ||
1555 | * @idx: which Tx queue within the set to update | ||
1556 | * @credits: number of new processed descriptors | ||
1557 | * @tx_completed: accumulates credits for the queues | ||
1558 | * | ||
1559 | * Updates the number of completed Tx descriptors for a queue set's Tx | ||
1560 | * queue. On UP systems we updated the information immediately but on | ||
1561 | * MP we accumulate the credits locally and update the Tx queue when we | ||
1562 | * reach a threshold to avoid cache-line bouncing. | ||
1563 | */ | ||
1564 | static inline void update_tx_completed(struct sge_qset *qs, int idx, | ||
1565 | unsigned int credits, | ||
1566 | unsigned int tx_completed[]) | ||
1567 | { | ||
1568 | #ifdef CONFIG_SMP | ||
1569 | tx_completed[idx] += credits; | ||
1570 | if (tx_completed[idx] > 32) { | ||
1571 | qs->txq[idx].processed += tx_completed[idx]; | ||
1572 | tx_completed[idx] = 0; | ||
1573 | } | ||
1574 | #else | ||
1575 | qs->txq[idx].processed += credits; | ||
1576 | #endif | ||
1577 | } | ||
1578 | |||
1579 | /** | ||
1580 | * restart_tx - check whether to restart suspended Tx queues | 1553 | * restart_tx - check whether to restart suspended Tx queues |
1581 | * @qs: the queue set to resume | 1554 | * @qs: the queue set to resume |
1582 | * | 1555 | * |
@@ -1656,13 +1629,12 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1656 | * handle_rsp_cntrl_info - handles control information in a response | 1629 | * handle_rsp_cntrl_info - handles control information in a response |
1657 | * @qs: the queue set corresponding to the response | 1630 | * @qs: the queue set corresponding to the response |
1658 | * @flags: the response control flags | 1631 | * @flags: the response control flags |
1659 | * @tx_completed: accumulates completion credits for the Tx queues | ||
1660 | * | 1632 | * |
1661 | * Handles the control information of an SGE response, such as GTS | 1633 | * Handles the control information of an SGE response, such as GTS |
1662 | * indications and completion credits for the queue set's Tx queues. | 1634 | * indications and completion credits for the queue set's Tx queues. |
1635 | * HW coalesces credits, we don't do any extra SW coalescing. | ||
1663 | */ | 1636 | */ |
1664 | static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags, | 1637 | static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) |
1665 | unsigned int tx_completed[]) | ||
1666 | { | 1638 | { |
1667 | unsigned int credits; | 1639 | unsigned int credits; |
1668 | 1640 | ||
@@ -1671,37 +1643,21 @@ static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags, | |||
1671 | clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); | 1643 | clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); |
1672 | #endif | 1644 | #endif |
1673 | 1645 | ||
1674 | /* ETH credits are already coalesced, return them immediately. */ | ||
1675 | credits = G_RSPD_TXQ0_CR(flags); | 1646 | credits = G_RSPD_TXQ0_CR(flags); |
1676 | if (credits) | 1647 | if (credits) |
1677 | qs->txq[TXQ_ETH].processed += credits; | 1648 | qs->txq[TXQ_ETH].processed += credits; |
1678 | 1649 | ||
1650 | credits = G_RSPD_TXQ2_CR(flags); | ||
1651 | if (credits) | ||
1652 | qs->txq[TXQ_CTRL].processed += credits; | ||
1653 | |||
1679 | # if USE_GTS | 1654 | # if USE_GTS |
1680 | if (flags & F_RSPD_TXQ1_GTS) | 1655 | if (flags & F_RSPD_TXQ1_GTS) |
1681 | clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); | 1656 | clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); |
1682 | # endif | 1657 | # endif |
1683 | update_tx_completed(qs, TXQ_OFLD, G_RSPD_TXQ1_CR(flags), tx_completed); | 1658 | credits = G_RSPD_TXQ1_CR(flags); |
1684 | update_tx_completed(qs, TXQ_CTRL, G_RSPD_TXQ2_CR(flags), tx_completed); | 1659 | if (credits) |
1685 | } | 1660 | qs->txq[TXQ_OFLD].processed += credits; |
1686 | |||
1687 | /** | ||
1688 | * flush_tx_completed - returns accumulated Tx completions to Tx queues | ||
1689 | * @qs: the queue set to update | ||
1690 | * @tx_completed: pending completion credits to return to Tx queues | ||
1691 | * | ||
1692 | * Updates the number of completed Tx descriptors for a queue set's Tx | ||
1693 | * queues with the credits pending in @tx_completed. This does something | ||
1694 | * only on MP systems as on UP systems we return the credits immediately. | ||
1695 | */ | ||
1696 | static inline void flush_tx_completed(struct sge_qset *qs, | ||
1697 | unsigned int tx_completed[]) | ||
1698 | { | ||
1699 | #if defined(CONFIG_SMP) | ||
1700 | if (tx_completed[TXQ_OFLD]) | ||
1701 | qs->txq[TXQ_OFLD].processed += tx_completed[TXQ_OFLD]; | ||
1702 | if (tx_completed[TXQ_CTRL]) | ||
1703 | qs->txq[TXQ_CTRL].processed += tx_completed[TXQ_CTRL]; | ||
1704 | #endif | ||
1705 | } | 1661 | } |
1706 | 1662 | ||
1707 | /** | 1663 | /** |
@@ -1784,7 +1740,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
1784 | struct sge_rspq *q = &qs->rspq; | 1740 | struct sge_rspq *q = &qs->rspq; |
1785 | struct rsp_desc *r = &q->desc[q->cidx]; | 1741 | struct rsp_desc *r = &q->desc[q->cidx]; |
1786 | int budget_left = budget; | 1742 | int budget_left = budget; |
1787 | unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 }; | 1743 | unsigned int sleeping = 0; |
1788 | struct sk_buff *offload_skbs[RX_BUNDLE_SIZE]; | 1744 | struct sk_buff *offload_skbs[RX_BUNDLE_SIZE]; |
1789 | int ngathered = 0; | 1745 | int ngathered = 0; |
1790 | 1746 | ||
@@ -1837,7 +1793,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
1837 | 1793 | ||
1838 | if (flags & RSPD_CTRL_MASK) { | 1794 | if (flags & RSPD_CTRL_MASK) { |
1839 | sleeping |= flags & RSPD_GTS_MASK; | 1795 | sleeping |= flags & RSPD_GTS_MASK; |
1840 | handle_rsp_cntrl_info(qs, flags, tx_completed); | 1796 | handle_rsp_cntrl_info(qs, flags); |
1841 | } | 1797 | } |
1842 | 1798 | ||
1843 | r++; | 1799 | r++; |
@@ -1868,7 +1824,6 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
1868 | --budget_left; | 1824 | --budget_left; |
1869 | } | 1825 | } |
1870 | 1826 | ||
1871 | flush_tx_completed(qs, tx_completed); | ||
1872 | deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); | 1827 | deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); |
1873 | if (sleeping) | 1828 | if (sleeping) |
1874 | check_ring_db(adap, qs, sleeping); | 1829 | check_ring_db(adap, qs, sleeping); |
@@ -1953,7 +1908,7 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, | |||
1953 | struct rsp_desc *r) | 1908 | struct rsp_desc *r) |
1954 | { | 1909 | { |
1955 | struct sge_rspq *q = &qs->rspq; | 1910 | struct sge_rspq *q = &qs->rspq; |
1956 | unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 }; | 1911 | unsigned int sleeping = 0; |
1957 | 1912 | ||
1958 | do { | 1913 | do { |
1959 | u32 flags = ntohl(r->flags); | 1914 | u32 flags = ntohl(r->flags); |
@@ -1968,7 +1923,7 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, | |||
1968 | 1923 | ||
1969 | if (flags & RSPD_CTRL_MASK) { | 1924 | if (flags & RSPD_CTRL_MASK) { |
1970 | sleeping |= flags & RSPD_GTS_MASK; | 1925 | sleeping |= flags & RSPD_GTS_MASK; |
1971 | handle_rsp_cntrl_info(qs, flags, tx_completed); | 1926 | handle_rsp_cntrl_info(qs, flags); |
1972 | } | 1927 | } |
1973 | 1928 | ||
1974 | q->pure_rsps++; | 1929 | q->pure_rsps++; |
@@ -1978,8 +1933,6 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, | |||
1978 | } | 1933 | } |
1979 | } while (is_new_response(r, q) && is_pure_response(r)); | 1934 | } while (is_new_response(r, q) && is_pure_response(r)); |
1980 | 1935 | ||
1981 | flush_tx_completed(qs, tx_completed); | ||
1982 | |||
1983 | if (sleeping) | 1936 | if (sleeping) |
1984 | check_ring_db(adap, qs, sleeping); | 1937 | check_ring_db(adap, qs, sleeping); |
1985 | 1938 | ||
@@ -2630,7 +2583,7 @@ void t3_sge_init(struct adapter *adap, struct sge_params *p) | |||
2630 | V_LORCQDRBTHRSH(512)); | 2583 | V_LORCQDRBTHRSH(512)); |
2631 | t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); | 2584 | t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); |
2632 | t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | | 2585 | t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | |
2633 | V_TIMEOUT(100 * core_ticks_per_usec(adap))); | 2586 | V_TIMEOUT(200 * core_ticks_per_usec(adap))); |
2634 | t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000); | 2587 | t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000); |
2635 | t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); | 2588 | t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); |
2636 | t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); | 2589 | t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); |