aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2009-03-12 17:14:04 -0400
committerDavid S. Miller <davem@davemloft.net>2009-03-13 14:30:45 -0400
commit42c8ea17e8f78752ed5a354791b0ea1697dc3480 (patch)
tree7edf15d2af436d532fdfd0c82a2a180f12886b8e /drivers/net/cxgb3
parentb2b964f0647c5156038834dd879f90442e33f2a5 (diff)
cxgb3: separate TX and RX reclaim handlers
Separate TX and RX reclaim handlers Don't disable interrupts in RX reclaim handler. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/adapter.h1
-rw-r--r--drivers/net/cxgb3/sge.c128
2 files changed, 89 insertions, 40 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 95dce4832478..66ce456614a8 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -197,6 +197,7 @@ struct sge_qset { /* an SGE queue set */
197 struct netdev_queue *tx_q; /* associated netdev TX queue */ 197 struct netdev_queue *tx_q; /* associated netdev TX queue */
198 unsigned long txq_stopped; /* which Tx queues are stopped */ 198 unsigned long txq_stopped; /* which Tx queues are stopped */
199 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ 199 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
200 struct timer_list rx_reclaim_timer; /* reclaims RX buffers */
200 unsigned long port_stats[SGE_PSTAT_MAX]; 201 unsigned long port_stats[SGE_PSTAT_MAX];
201} ____cacheline_aligned; 202} ____cacheline_aligned;
202 203
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a482429846eb..7d779d18e137 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -61,6 +61,7 @@
61#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) 61#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
62 62
63#define SGE_RX_DROP_THRES 16 63#define SGE_RX_DROP_THRES 16
64#define RX_RECLAIM_PERIOD (HZ/4)
64 65
65/* 66/*
66 * Max number of Rx buffers we replenish at a time. 67 * Max number of Rx buffers we replenish at a time.
@@ -71,6 +72,8 @@
71 * frequently as Tx buffers are usually reclaimed by new Tx packets. 72 * frequently as Tx buffers are usually reclaimed by new Tx packets.
72 */ 73 */
73#define TX_RECLAIM_PERIOD (HZ / 4) 74#define TX_RECLAIM_PERIOD (HZ / 4)
75#define TX_RECLAIM_TIMER_CHUNK 64U
76#define TX_RECLAIM_CHUNK 16U
74 77
75/* WR size in bytes */ 78/* WR size in bytes */
76#define WR_LEN (WR_FLITS * 8) 79#define WR_LEN (WR_FLITS * 8)
@@ -308,21 +311,25 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
308 * reclaim_completed_tx - reclaims completed Tx descriptors 311 * reclaim_completed_tx - reclaims completed Tx descriptors
309 * @adapter: the adapter 312 * @adapter: the adapter
310 * @q: the Tx queue to reclaim completed descriptors from 313 * @q: the Tx queue to reclaim completed descriptors from
314 * @chunk: maximum number of descriptors to reclaim
311 * 315 *
312 * Reclaims Tx descriptors that the SGE has indicated it has processed, 316 * Reclaims Tx descriptors that the SGE has indicated it has processed,
313 * and frees the associated buffers if possible. Called with the Tx 317 * and frees the associated buffers if possible. Called with the Tx
314 * queue's lock held. 318 * queue's lock held.
315 */ 319 */
316static inline void reclaim_completed_tx(struct adapter *adapter, 320static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
317 struct sge_txq *q) 321 struct sge_txq *q,
322 unsigned int chunk)
318{ 323{
319 unsigned int reclaim = q->processed - q->cleaned; 324 unsigned int reclaim = q->processed - q->cleaned;
320 325
326 reclaim = min(chunk, reclaim);
321 if (reclaim) { 327 if (reclaim) {
322 free_tx_desc(adapter, q, reclaim); 328 free_tx_desc(adapter, q, reclaim);
323 q->cleaned += reclaim; 329 q->cleaned += reclaim;
324 q->in_use -= reclaim; 330 q->in_use -= reclaim;
325 } 331 }
332 return q->processed - q->cleaned;
326} 333}
327 334
328/** 335/**
@@ -601,6 +608,7 @@ static void t3_reset_qset(struct sge_qset *q)
601 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 608 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
602 q->txq_stopped = 0; 609 q->txq_stopped = 0;
603 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ 610 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
611 q->rx_reclaim_timer.function = NULL;
604 q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0; 612 q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
605} 613}
606 614
@@ -1179,7 +1187,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1179 txq = netdev_get_tx_queue(dev, qidx); 1187 txq = netdev_get_tx_queue(dev, qidx);
1180 1188
1181 spin_lock(&q->lock); 1189 spin_lock(&q->lock);
1182 reclaim_completed_tx(adap, q); 1190 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1183 1191
1184 credits = q->size - q->in_use; 1192 credits = q->size - q->in_use;
1185 ndesc = calc_tx_descs(skb); 1193 ndesc = calc_tx_descs(skb);
@@ -1588,7 +1596,7 @@ static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1588 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen; 1596 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1589 1597
1590 spin_lock(&q->lock); 1598 spin_lock(&q->lock);
1591 again:reclaim_completed_tx(adap, q); 1599again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1592 1600
1593 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); 1601 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1594 if (unlikely(ret)) { 1602 if (unlikely(ret)) {
@@ -1630,7 +1638,7 @@ static void restart_offloadq(unsigned long data)
1630 struct adapter *adap = pi->adapter; 1638 struct adapter *adap = pi->adapter;
1631 1639
1632 spin_lock(&q->lock); 1640 spin_lock(&q->lock);
1633 again:reclaim_completed_tx(adap, q); 1641again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1634 1642
1635 while ((skb = skb_peek(&q->sendq)) != NULL) { 1643 while ((skb = skb_peek(&q->sendq)) != NULL) {
1636 unsigned int gen, pidx; 1644 unsigned int gen, pidx;
@@ -2747,13 +2755,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
2747} 2755}
2748 2756
2749/** 2757/**
2750 * sge_timer_cb - perform periodic maintenance of an SGE qset 2758 * sge_timer_tx - perform periodic maintenance of an SGE qset
2751 * @data: the SGE queue set to maintain 2759 * @data: the SGE queue set to maintain
2752 * 2760 *
2753 * Runs periodically from a timer to perform maintenance of an SGE queue 2761 * Runs periodically from a timer to perform maintenance of an SGE queue
2754 * set. It performs two tasks: 2762 * set. It performs two tasks:
2755 * 2763 *
2756 * a) Cleans up any completed Tx descriptors that may still be pending. 2764 * Cleans up any completed Tx descriptors that may still be pending.
2757 * Normal descriptor cleanup happens when new packets are added to a Tx 2765 * Normal descriptor cleanup happens when new packets are added to a Tx
2758 * queue so this timer is relatively infrequent and does any cleanup only 2766 * queue so this timer is relatively infrequent and does any cleanup only
2759 * if the Tx queue has not seen any new packets in a while. We make a 2767 * if the Tx queue has not seen any new packets in a while. We make a
@@ -2763,51 +2771,87 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
2763 * up). Since control queues use immediate data exclusively we don't 2771 * up). Since control queues use immediate data exclusively we don't
2764 * bother cleaning them up here. 2772 * bother cleaning them up here.
2765 * 2773 *
2766 * b) Replenishes Rx queues that have run out due to memory shortage.
2767 * Normally new Rx buffers are added when existing ones are consumed but
2768 * when out of memory a queue can become empty. We try to add only a few
2769 * buffers here, the queue will be replenished fully as these new buffers
2770 * are used up if memory shortage has subsided.
2771 */ 2774 */
2772static void sge_timer_cb(unsigned long data) 2775static void sge_timer_tx(unsigned long data)
2773{ 2776{
2774 spinlock_t *lock;
2775 struct sge_qset *qs = (struct sge_qset *)data; 2777 struct sge_qset *qs = (struct sge_qset *)data;
2776 struct adapter *adap = qs->adap; 2778 struct port_info *pi = netdev_priv(qs->netdev);
2779 struct adapter *adap = pi->adapter;
2780 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2781 unsigned long next_period;
2777 2782
2778 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) { 2783 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2779 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]); 2784 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2785 TX_RECLAIM_TIMER_CHUNK);
2780 spin_unlock(&qs->txq[TXQ_ETH].lock); 2786 spin_unlock(&qs->txq[TXQ_ETH].lock);
2781 } 2787 }
2782 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { 2788 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2783 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]); 2789 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2790 TX_RECLAIM_TIMER_CHUNK);
2784 spin_unlock(&qs->txq[TXQ_OFLD].lock); 2791 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2785 } 2792 }
2786 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock : 2793
2787 &adap->sge.qs[0].rspq.lock; 2794 next_period = TX_RECLAIM_PERIOD >>
2788 if (spin_trylock_irq(lock)) { 2795 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2789 if (!napi_is_scheduled(&qs->napi)) { 2796 TX_RECLAIM_TIMER_CHUNK);
2790 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); 2797 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2791 2798}
2792 if (qs->fl[0].credits < qs->fl[0].size) 2799
2793 __refill_fl(adap, &qs->fl[0]); 2800/*
2794 if (qs->fl[1].credits < qs->fl[1].size) 2801 * sge_timer_rx - perform periodic maintenance of an SGE qset
2795 __refill_fl(adap, &qs->fl[1]); 2802 * @data: the SGE queue set to maintain
2796 2803 *
2797 if (status & (1 << qs->rspq.cntxt_id)) { 2804 * a) Replenishes Rx queues that have run out due to memory shortage.
2798 qs->rspq.starved++; 2805 * Normally new Rx buffers are added when existing ones are consumed but
2799 if (qs->rspq.credits) { 2806 * when out of memory a queue can become empty. We try to add only a few
2800 refill_rspq(adap, &qs->rspq, 1); 2807 * buffers here, the queue will be replenished fully as these new buffers
2801 qs->rspq.credits--; 2808 * are used up if memory shortage has subsided.
2802 qs->rspq.restarted++; 2809 *
2803 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, 2810 * b) Return coalesced response queue credits in case a response queue is
2804 1 << qs->rspq.cntxt_id); 2811 * starved.
2805 } 2812 *
2813 */
2814static void sge_timer_rx(unsigned long data)
2815{
2816 spinlock_t *lock;
2817 struct sge_qset *qs = (struct sge_qset *)data;
2818 struct port_info *pi = netdev_priv(qs->netdev);
2819 struct adapter *adap = pi->adapter;
2820 u32 status;
2821
2822 lock = adap->params.rev > 0 ?
2823 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2824
2825 if (!spin_trylock_irq(lock))
2826 goto out;
2827
2828 if (napi_is_scheduled(&qs->napi))
2829 goto unlock;
2830
2831 if (adap->params.rev < 4) {
2832 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2833
2834 if (status & (1 << qs->rspq.cntxt_id)) {
2835 qs->rspq.starved++;
2836 if (qs->rspq.credits) {
2837 qs->rspq.credits--;
2838 refill_rspq(adap, &qs->rspq, 1);
2839 qs->rspq.restarted++;
2840 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2841 1 << qs->rspq.cntxt_id);
2806 } 2842 }
2807 } 2843 }
2808 spin_unlock_irq(lock);
2809 } 2844 }
2810 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 2845
2846 if (qs->fl[0].credits < qs->fl[0].size)
2847 __refill_fl(adap, &qs->fl[0]);
2848 if (qs->fl[1].credits < qs->fl[1].size)
2849 __refill_fl(adap, &qs->fl[1]);
2850
2851unlock:
2852 spin_unlock_irq(lock);
2853out:
2854 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
2811} 2855}
2812 2856
2813/** 2857/**
@@ -2850,7 +2894,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2850 struct sge_qset *q = &adapter->sge.qs[id]; 2894 struct sge_qset *q = &adapter->sge.qs[id];
2851 2895
2852 init_qset_cntxt(q, id); 2896 init_qset_cntxt(q, id);
2853 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q); 2897 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
2898 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
2854 2899
2855 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, 2900 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2856 sizeof(struct rx_desc), 2901 sizeof(struct rx_desc),
@@ -2999,6 +3044,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2999 V_NEWTIMER(q->rspq.holdoff_tmr)); 3044 V_NEWTIMER(q->rspq.holdoff_tmr));
3000 3045
3001 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 3046 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3047 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3002 3048
3003 return 0; 3049 return 0;
3004 3050
@@ -3024,6 +3070,8 @@ void t3_stop_sge_timers(struct adapter *adap)
3024 3070
3025 if (q->tx_reclaim_timer.function) 3071 if (q->tx_reclaim_timer.function)
3026 del_timer_sync(&q->tx_reclaim_timer); 3072 del_timer_sync(&q->tx_reclaim_timer);
3073 if (q->rx_reclaim_timer.function)
3074 del_timer_sync(&q->rx_reclaim_timer);
3027 } 3075 }
3028} 3076}
3029 3077