diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2008-09-01 07:44:59 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-09-03 09:53:42 -0400 |
commit | 60ac10658c2e234cf7bc27e0930e324c6c6fcf61 (patch) | |
tree | b16ee9efd385806755c6912492db43997c4f4b98 /drivers/net/sfc/tx.c | |
parent | 26c086771a8ad0a1a72699674fa712fe6aeacb02 (diff) |
sfc: Use separate hardware TX queues to select checksum generation
Checksum generation is an attribute of our hardware TX queues, not TX
descriptors. We previously used a single queue and turned checksum
generation on or off as requested through ethtool. However, this can
result in regenerating checksums in raw packets that should not be
modified. We now create 2 hardware TX queues with checksum generation
on or off. They are presented to the net core as one queue since it
does not know how to select between them.
The self-test verifies that a bad checksum is unaltered on the queue
with checksum generation off.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/sfc/tx.c')
-rw-r--r-- | drivers/net/sfc/tx.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 2a09101f67eb..e5e0bab313eb 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -368,7 +368,14 @@ inline int efx_xmit(struct efx_nic *efx, | |||
368 | int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | 368 | int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) |
369 | { | 369 | { |
370 | struct efx_nic *efx = netdev_priv(net_dev); | 370 | struct efx_nic *efx = netdev_priv(net_dev); |
371 | return efx_xmit(efx, &efx->tx_queue[0], skb); | 371 | struct efx_tx_queue *tx_queue; |
372 | |||
373 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) | ||
374 | tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM]; | ||
375 | else | ||
376 | tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM]; | ||
377 | |||
378 | return efx_xmit(efx, tx_queue, skb); | ||
372 | } | 379 | } |
373 | 380 | ||
374 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | 381 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) |
@@ -412,26 +419,21 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) | |||
412 | /* Allocate software ring */ | 419 | /* Allocate software ring */ |
413 | txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); | 420 | txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); |
414 | tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); | 421 | tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); |
415 | if (!tx_queue->buffer) { | 422 | if (!tx_queue->buffer) |
416 | rc = -ENOMEM; | 423 | return -ENOMEM; |
417 | goto fail1; | ||
418 | } | ||
419 | for (i = 0; i <= efx->type->txd_ring_mask; ++i) | 424 | for (i = 0; i <= efx->type->txd_ring_mask; ++i) |
420 | tx_queue->buffer[i].continuation = 1; | 425 | tx_queue->buffer[i].continuation = 1; |
421 | 426 | ||
422 | /* Allocate hardware ring */ | 427 | /* Allocate hardware ring */ |
423 | rc = falcon_probe_tx(tx_queue); | 428 | rc = falcon_probe_tx(tx_queue); |
424 | if (rc) | 429 | if (rc) |
425 | goto fail2; | 430 | goto fail; |
426 | 431 | ||
427 | return 0; | 432 | return 0; |
428 | 433 | ||
429 | fail2: | 434 | fail: |
430 | kfree(tx_queue->buffer); | 435 | kfree(tx_queue->buffer); |
431 | tx_queue->buffer = NULL; | 436 | tx_queue->buffer = NULL; |
432 | fail1: | ||
433 | tx_queue->used = 0; | ||
434 | |||
435 | return rc; | 437 | return rc; |
436 | } | 438 | } |
437 | 439 | ||
@@ -494,7 +496,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | |||
494 | 496 | ||
495 | kfree(tx_queue->buffer); | 497 | kfree(tx_queue->buffer); |
496 | tx_queue->buffer = NULL; | 498 | tx_queue->buffer = NULL; |
497 | tx_queue->used = 0; | ||
498 | } | 499 | } |
499 | 500 | ||
500 | 501 | ||