aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2011-11-28 11:33:37 -0500
committerDavid S. Miller <davem@davemloft.net>2011-11-29 12:46:20 -0500
commit2df1a70aaf70e8dff11b89b938a5f317556ee640 (patch)
tree0215865d1acd5fd0eb67badc9394abf6cee0deaa /drivers/net/ethernet/broadcom
parent298376d3e8f00147548c426959ce79efc47b669a (diff)
bnx2x: Support for byte queue limits
Changes to bnx2x to use byte queue limits. Signed-off-by: Tom Herbert <therbert@google.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8336c784db49..42ce56617042 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -102,7 +102,8 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
102 * return idx of last bd freed 102 * return idx of last bd freed
103 */ 103 */
104static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, 104static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
105 u16 idx) 105 u16 idx, unsigned int *pkts_compl,
106 unsigned int *bytes_compl)
106{ 107{
107 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; 108 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
108 struct eth_tx_start_bd *tx_start_bd; 109 struct eth_tx_start_bd *tx_start_bd;
@@ -159,6 +160,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
159 160
160 /* release skb */ 161 /* release skb */
161 WARN_ON(!skb); 162 WARN_ON(!skb);
163 if (skb) {
164 (*pkts_compl)++;
165 (*bytes_compl) += skb->len;
166 }
162 dev_kfree_skb_any(skb); 167 dev_kfree_skb_any(skb);
163 tx_buf->first_bd = 0; 168 tx_buf->first_bd = 0;
164 tx_buf->skb = NULL; 169 tx_buf->skb = NULL;
@@ -170,6 +175,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
170{ 175{
171 struct netdev_queue *txq; 176 struct netdev_queue *txq;
172 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; 177 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
178 unsigned int pkts_compl = 0, bytes_compl = 0;
173 179
174#ifdef BNX2X_STOP_ON_ERROR 180#ifdef BNX2X_STOP_ON_ERROR
175 if (unlikely(bp->panic)) 181 if (unlikely(bp->panic))
@@ -189,10 +195,14 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
189 " pkt_cons %u\n", 195 " pkt_cons %u\n",
190 txdata->txq_index, hw_cons, sw_cons, pkt_cons); 196 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
191 197
192 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons); 198 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
199 &pkts_compl, &bytes_compl);
200
193 sw_cons++; 201 sw_cons++;
194 } 202 }
195 203
204 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
205
196 txdata->tx_pkt_cons = sw_cons; 206 txdata->tx_pkt_cons = sw_cons;
197 txdata->tx_bd_cons = bd_cons; 207 txdata->tx_bd_cons = bd_cons;
198 208
@@ -1077,14 +1087,18 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1077 struct bnx2x_fastpath *fp = &bp->fp[i]; 1087 struct bnx2x_fastpath *fp = &bp->fp[i];
1078 for_each_cos_in_tx_queue(fp, cos) { 1088 for_each_cos_in_tx_queue(fp, cos) {
1079 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 1089 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1090 unsigned pkts_compl = 0, bytes_compl = 0;
1080 1091
1081 u16 sw_prod = txdata->tx_pkt_prod; 1092 u16 sw_prod = txdata->tx_pkt_prod;
1082 u16 sw_cons = txdata->tx_pkt_cons; 1093 u16 sw_cons = txdata->tx_pkt_cons;
1083 1094
1084 while (sw_cons != sw_prod) { 1095 while (sw_cons != sw_prod) {
1085 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons)); 1096 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1097 &pkts_compl, &bytes_compl);
1086 sw_cons++; 1098 sw_cons++;
1087 } 1099 }
1100 netdev_tx_reset_queue(
1101 netdev_get_tx_queue(bp->dev, txdata->txq_index));
1088 } 1102 }
1089 } 1103 }
1090} 1104}
@@ -2788,6 +2802,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2788 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, 2802 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2789 skb_frag_size(frag), DMA_TO_DEVICE); 2803 skb_frag_size(frag), DMA_TO_DEVICE);
2790 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 2804 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2805 unsigned int pkts_compl = 0, bytes_compl = 0;
2791 2806
2792 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " 2807 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2793 "dropping packet...\n"); 2808 "dropping packet...\n");
@@ -2799,7 +2814,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2799 */ 2814 */
2800 first_bd->nbd = cpu_to_le16(nbd); 2815 first_bd->nbd = cpu_to_le16(nbd);
2801 bnx2x_free_tx_pkt(bp, txdata, 2816 bnx2x_free_tx_pkt(bp, txdata,
2802 TX_BD(txdata->tx_pkt_prod)); 2817 TX_BD(txdata->tx_pkt_prod),
2818 &pkts_compl, &bytes_compl);
2803 return NETDEV_TX_OK; 2819 return NETDEV_TX_OK;
2804 } 2820 }
2805 2821
@@ -2860,6 +2876,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2860 pbd_e2->parsing_data); 2876 pbd_e2->parsing_data);
2861 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 2877 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2862 2878
2879 netdev_tx_sent_queue(txq, skb->len);
2880
2863 txdata->tx_pkt_prod++; 2881 txdata->tx_pkt_prod++;
2864 /* 2882 /*
2865 * Make sure that the BD data is updated before updating the producer 2883 * Make sure that the BD data is updated before updating the producer