diff options
author | Tom Herbert <therbert@google.com> | 2011-11-28 11:33:43 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-11-29 12:46:20 -0500 |
commit | c3940999b29ca7d6ad9b37b827a058c90fd51992 (patch) | |
tree | b066cae3d9c3dbc2b215982d4eec8e04c8e4bbb0 /drivers/net/ethernet/sfc | |
parent | 2df1a70aaf70e8dff11b89b938a5f317556ee640 (diff) |
sfc: Support for byte queue limits
Changes to sfc to use byte queue limits.
Signed-off-by: Tom Herbert <therbert@google.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r-- | drivers/net/ethernet/sfc/tx.c | 27 |
1 files changed, 21 insertions, 6 deletions
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index df88c5430f95..ab4c63570023 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c | |||
@@ -31,7 +31,9 @@ | |||
31 | #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) | 31 | #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) |
32 | 32 | ||
33 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, | 33 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, |
34 | struct efx_tx_buffer *buffer) | 34 | struct efx_tx_buffer *buffer, |
35 | unsigned int *pkts_compl, | ||
36 | unsigned int *bytes_compl) | ||
35 | { | 37 | { |
36 | if (buffer->unmap_len) { | 38 | if (buffer->unmap_len) { |
37 | struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | 39 | struct pci_dev *pci_dev = tx_queue->efx->pci_dev; |
@@ -48,6 +50,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, | |||
48 | } | 50 | } |
49 | 51 | ||
50 | if (buffer->skb) { | 52 | if (buffer->skb) { |
53 | (*pkts_compl)++; | ||
54 | (*bytes_compl) += buffer->skb->len; | ||
51 | dev_kfree_skb_any((struct sk_buff *) buffer->skb); | 55 | dev_kfree_skb_any((struct sk_buff *) buffer->skb); |
52 | buffer->skb = NULL; | 56 | buffer->skb = NULL; |
53 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, | 57 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, |
@@ -250,6 +254,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
250 | buffer->skb = skb; | 254 | buffer->skb = skb; |
251 | buffer->continuation = false; | 255 | buffer->continuation = false; |
252 | 256 | ||
257 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); | ||
258 | |||
253 | /* Pass off to hardware */ | 259 | /* Pass off to hardware */ |
254 | efx_nic_push_buffers(tx_queue); | 260 | efx_nic_push_buffers(tx_queue); |
255 | 261 | ||
@@ -267,10 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
267 | unwind: | 273 | unwind: |
268 | /* Work backwards until we hit the original insert pointer value */ | 274 | /* Work backwards until we hit the original insert pointer value */ |
269 | while (tx_queue->insert_count != tx_queue->write_count) { | 275 | while (tx_queue->insert_count != tx_queue->write_count) { |
276 | unsigned int pkts_compl = 0, bytes_compl = 0; | ||
270 | --tx_queue->insert_count; | 277 | --tx_queue->insert_count; |
271 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; | 278 | insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; |
272 | buffer = &tx_queue->buffer[insert_ptr]; | 279 | buffer = &tx_queue->buffer[insert_ptr]; |
273 | efx_dequeue_buffer(tx_queue, buffer); | 280 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
274 | buffer->len = 0; | 281 | buffer->len = 0; |
275 | } | 282 | } |
276 | 283 | ||
@@ -293,7 +300,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | |||
293 | * specified index. | 300 | * specified index. |
294 | */ | 301 | */ |
295 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, | 302 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, |
296 | unsigned int index) | 303 | unsigned int index, |
304 | unsigned int *pkts_compl, | ||
305 | unsigned int *bytes_compl) | ||
297 | { | 306 | { |
298 | struct efx_nic *efx = tx_queue->efx; | 307 | struct efx_nic *efx = tx_queue->efx; |
299 | unsigned int stop_index, read_ptr; | 308 | unsigned int stop_index, read_ptr; |
@@ -311,7 +320,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, | |||
311 | return; | 320 | return; |
312 | } | 321 | } |
313 | 322 | ||
314 | efx_dequeue_buffer(tx_queue, buffer); | 323 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); |
315 | buffer->continuation = true; | 324 | buffer->continuation = true; |
316 | buffer->len = 0; | 325 | buffer->len = 0; |
317 | 326 | ||
@@ -422,10 +431,12 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
422 | { | 431 | { |
423 | unsigned fill_level; | 432 | unsigned fill_level; |
424 | struct efx_nic *efx = tx_queue->efx; | 433 | struct efx_nic *efx = tx_queue->efx; |
434 | unsigned int pkts_compl = 0, bytes_compl = 0; | ||
425 | 435 | ||
426 | EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); | 436 | EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); |
427 | 437 | ||
428 | efx_dequeue_buffers(tx_queue, index); | 438 | efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); |
439 | netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); | ||
429 | 440 | ||
430 | /* See if we need to restart the netif queue. This barrier | 441 | /* See if we need to restart the netif queue. This barrier |
431 | * separates the update of read_count from the test of the | 442 | * separates the update of read_count from the test of the |
@@ -515,13 +526,15 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) | |||
515 | 526 | ||
516 | /* Free any buffers left in the ring */ | 527 | /* Free any buffers left in the ring */ |
517 | while (tx_queue->read_count != tx_queue->write_count) { | 528 | while (tx_queue->read_count != tx_queue->write_count) { |
529 | unsigned int pkts_compl = 0, bytes_compl = 0; | ||
518 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; | 530 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; |
519 | efx_dequeue_buffer(tx_queue, buffer); | 531 | efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); |
520 | buffer->continuation = true; | 532 | buffer->continuation = true; |
521 | buffer->len = 0; | 533 | buffer->len = 0; |
522 | 534 | ||
523 | ++tx_queue->read_count; | 535 | ++tx_queue->read_count; |
524 | } | 536 | } |
537 | netdev_tx_reset_queue(tx_queue->core_txq); | ||
525 | } | 538 | } |
526 | 539 | ||
527 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | 540 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) |
@@ -1163,6 +1176,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | |||
1163 | /* Pass off to hardware */ | 1176 | /* Pass off to hardware */ |
1164 | efx_nic_push_buffers(tx_queue); | 1177 | efx_nic_push_buffers(tx_queue); |
1165 | 1178 | ||
1179 | netdev_tx_sent_queue(tx_queue->core_txq, skb->len); | ||
1180 | |||
1166 | tx_queue->tso_bursts++; | 1181 | tx_queue->tso_bursts++; |
1167 | return NETDEV_TX_OK; | 1182 | return NETDEV_TX_OK; |
1168 | 1183 | ||