aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qlogic/qede/qede.h
diff options
context:
space:
mode:
authorMintz, Yuval <Yuval.Mintz@cavium.com>2017-01-01 06:57:04 -0500
committerDavid S. Miller <davem@davemloft.net>2017-01-01 21:02:14 -0500
commite3eef7ee0201dbe5f4fc011b58d26228b57736ce (patch)
tree6d4c2f2cc6cab6bd563ffc3a3f10d4f5827b82c2 /drivers/net/ethernet/qlogic/qede/qede.h
parente1d32acbcbd35af5264acc70ff03bf8da9e447a8 (diff)
qede: Postpone reallocation until NAPI end
During Rx flow driver allocates a replacement buffer each time it consumes an Rx buffer. Failing to do so, it would consume the currently processed buffer and re-post it on the ring. As a result, the Rx ring is always completely full [from driver POV]. We now allow the Rx ring to shorten by doing the re-allocations at the end of the NAPI run. The only limitation is that we still want to make sure each time we reallocate that we'd still have sufficient elements in the Rx ring to guarantee that FW would be able to post additional data and trigger an interrupt. Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/qlogic/qede/qede.h')
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index ab49263c9d43..1c5aac4b6139 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -280,7 +280,7 @@ struct qede_rx_queue {
280 u16 sw_rx_cons; 280 u16 sw_rx_cons;
281 u16 sw_rx_prod; 281 u16 sw_rx_prod;
282 282
283 u16 num_rx_buffers; /* Slowpath */ 283 u16 filled_buffers;
284 u8 data_direction; 284 u8 data_direction;
285 u8 rxq_id; 285 u8 rxq_id;
286 286
@@ -293,6 +293,9 @@ struct qede_rx_queue {
293 struct qed_chain rx_bd_ring; 293 struct qed_chain rx_bd_ring;
294 struct qed_chain rx_comp_ring ____cacheline_aligned; 294 struct qed_chain rx_comp_ring ____cacheline_aligned;
295 295
296 /* Used once per each NAPI run */
297 u16 num_rx_buffers;
298
296 /* GRO */ 299 /* GRO */
297 struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; 300 struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
298 301
@@ -414,7 +417,7 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
414 struct net_device *dev, 417 struct net_device *dev,
415 netdev_features_t features); 418 netdev_features_t features);
416void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp); 419void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
417int qede_alloc_rx_buffer(struct qede_rx_queue *rxq); 420int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
418int qede_free_tx_pkt(struct qede_dev *edev, 421int qede_free_tx_pkt(struct qede_dev *edev,
419 struct qede_tx_queue *txq, int *len); 422 struct qede_tx_queue *txq, int *len);
420int qede_poll(struct napi_struct *napi, int budget); 423int qede_poll(struct napi_struct *napi, int budget);