aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2008-09-01 07:44:59 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-09-03 09:53:42 -0400
commit60ac10658c2e234cf7bc27e0930e324c6c6fcf61 (patch)
treeb16ee9efd385806755c6912492db43997c4f4b98 /drivers
parent26c086771a8ad0a1a72699674fa712fe6aeacb02 (diff)
sfc: Use separate hardware TX queues to select checksum generation
Checksum generation is an attribute of our hardware TX queues, not TX descriptors. We previously used a single queue and turned checksum generation on or off as requested through ethtool. However, this can result in regenerating checksums in raw packets that should not be modified. We now create 2 hardware TX queues with checksum generation on or off. They are presented to the net core as one queue since it does not know how to select between them. The self-test verifies that a bad checksum is unaltered on the queue with checksum generation off. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/sfc/efx.c25
-rw-r--r--drivers/net/sfc/ethtool.c45
-rw-r--r--drivers/net/sfc/falcon.c11
-rw-r--r--drivers/net/sfc/net_driver.h21
-rw-r--r--drivers/net/sfc/selftest.c11
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/tx.c23
7 files changed, 48 insertions, 92 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 42539802b7ae..2a2300571cbf 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -923,22 +923,13 @@ static void efx_select_used(struct efx_nic *efx)
923 struct efx_rx_queue *rx_queue; 923 struct efx_rx_queue *rx_queue;
924 int i; 924 int i;
925 925
926 /* TX queues. One per port per channel with TX capability 926 efx_for_each_tx_queue(tx_queue, efx) {
927 * (more than one per port won't work on Linux, due to out 927 if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
928 * of order issues... but will be fine on Solaris) 928 tx_queue->channel = &efx->channel[1];
929 */ 929 else
930 tx_queue = &efx->tx_queue[0]; 930 tx_queue->channel = &efx->channel[0];
931 931 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
932 /* Perform this for each channel with TX capabilities. 932 }
933 * At the moment, we only support a single TX queue
934 */
935 tx_queue->used = 1;
936 if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
937 tx_queue->channel = &efx->channel[1];
938 else
939 tx_queue->channel = &efx->channel[0];
940 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
941 tx_queue++;
942 933
943 /* RX queues. Each has a dedicated channel. */ 934 /* RX queues. Each has a dedicated channel. */
944 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { 935 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
@@ -1881,7 +1872,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1881 channel->evqnum = i; 1872 channel->evqnum = i;
1882 channel->work_pending = 0; 1873 channel->work_pending = 0;
1883 } 1874 }
1884 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) { 1875 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
1885 tx_queue = &efx->tx_queue[i]; 1876 tx_queue = &efx->tx_queue[i];
1886 tx_queue->efx = efx; 1877 tx_queue->efx = efx;
1887 tx_queue->queue = i; 1878 tx_queue->queue = i;
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 8a15be6548d2..ccd82f12456c 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -32,8 +32,6 @@ const char *efx_loopback_mode_names[] = {
32 [LOOPBACK_NETWORK] = "NETWORK", 32 [LOOPBACK_NETWORK] = "NETWORK",
33}; 33};
34 34
35static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
36
37struct ethtool_string { 35struct ethtool_string {
38 char name[ETH_GSTRING_LEN]; 36 char name[ETH_GSTRING_LEN];
39}; 37};
@@ -442,45 +440,6 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
442 } 440 }
443} 441}
444 442
445static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
446{
447 int rc;
448
449 /* Our TSO requires TX checksumming, so force TX checksumming
450 * on when TSO is enabled.
451 */
452 if (enable) {
453 rc = efx_ethtool_set_tx_csum(net_dev, 1);
454 if (rc)
455 return rc;
456 }
457
458 return ethtool_op_set_tso(net_dev, enable);
459}
460
461static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
462{
463 struct efx_nic *efx = netdev_priv(net_dev);
464 int rc;
465
466 rc = ethtool_op_set_tx_csum(net_dev, enable);
467 if (rc)
468 return rc;
469
470 efx_flush_queues(efx);
471
472 /* Our TSO requires TX checksumming, so disable TSO when
473 * checksumming is disabled
474 */
475 if (!enable) {
476 rc = efx_ethtool_set_tso(net_dev, 0);
477 if (rc)
478 return rc;
479 }
480
481 return 0;
482}
483
484static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable) 443static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
485{ 444{
486 struct efx_nic *efx = netdev_priv(net_dev); 445 struct efx_nic *efx = netdev_priv(net_dev);
@@ -701,11 +660,11 @@ struct ethtool_ops efx_ethtool_ops = {
701 .get_rx_csum = efx_ethtool_get_rx_csum, 660 .get_rx_csum = efx_ethtool_get_rx_csum,
702 .set_rx_csum = efx_ethtool_set_rx_csum, 661 .set_rx_csum = efx_ethtool_set_rx_csum,
703 .get_tx_csum = ethtool_op_get_tx_csum, 662 .get_tx_csum = ethtool_op_get_tx_csum,
704 .set_tx_csum = efx_ethtool_set_tx_csum, 663 .set_tx_csum = ethtool_op_set_tx_csum,
705 .get_sg = ethtool_op_get_sg, 664 .get_sg = ethtool_op_get_sg,
706 .set_sg = ethtool_op_set_sg, 665 .set_sg = ethtool_op_set_sg,
707 .get_tso = ethtool_op_get_tso, 666 .get_tso = ethtool_op_get_tso,
708 .set_tso = efx_ethtool_set_tso, 667 .set_tso = ethtool_op_set_tso,
709 .get_flags = ethtool_op_get_flags, 668 .get_flags = ethtool_op_get_flags,
710 .set_flags = ethtool_op_set_flags, 669 .set_flags = ethtool_op_set_flags,
711 .self_test_count = efx_ethtool_self_test_count, 670 .self_test_count = efx_ethtool_self_test_count,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index c7aa2f61cb1c..b73f1ea8bd58 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -474,9 +474,9 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
474 TX_NON_IP_DROP_DIS_B0, 1); 474 TX_NON_IP_DROP_DIS_B0, 1);
475 475
476 if (falcon_rev(efx) >= FALCON_REV_B0) { 476 if (falcon_rev(efx) >= FALCON_REV_B0) {
477 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); 477 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
478 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); 478 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum);
479 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); 479 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum);
480 } 480 }
481 481
482 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 482 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
@@ -485,10 +485,11 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
485 if (falcon_rev(efx) < FALCON_REV_B0) { 485 if (falcon_rev(efx) < FALCON_REV_B0) {
486 efx_oword_t reg; 486 efx_oword_t reg;
487 487
488 BUG_ON(tx_queue->queue >= 128); /* HW limit */ 488 /* Only 128 bits in this register */
489 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
489 490
490 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1); 491 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
491 if (efx->net_dev->features & NETIF_F_IP_CSUM) 492 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
492 clear_bit_le(tx_queue->queue, (void *)&reg); 493 clear_bit_le(tx_queue->queue, (void *)&reg);
493 else 494 else
494 set_bit_le(tx_queue->queue, (void *)&reg); 495 set_bit_le(tx_queue->queue, (void *)&reg);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 628f25e32918..f539e2e0da1b 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -88,9 +88,12 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
88 **************************************************************************/ 88 **************************************************************************/
89 89
90#define EFX_MAX_CHANNELS 32 90#define EFX_MAX_CHANNELS 32
91#define EFX_MAX_TX_QUEUES 1
92#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 91#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
93 92
93#define EFX_TX_QUEUE_OFFLOAD_CSUM 0
94#define EFX_TX_QUEUE_NO_CSUM 1
95#define EFX_TX_QUEUE_COUNT 2
96
94/** 97/**
95 * struct efx_special_buffer - An Efx special buffer 98 * struct efx_special_buffer - An Efx special buffer
96 * @addr: CPU base address of the buffer 99 * @addr: CPU base address of the buffer
@@ -156,7 +159,6 @@ struct efx_tx_buffer {
156 * 159 *
157 * @efx: The associated Efx NIC 160 * @efx: The associated Efx NIC
158 * @queue: DMA queue number 161 * @queue: DMA queue number
159 * @used: Queue is used by net driver
160 * @channel: The associated channel 162 * @channel: The associated channel
161 * @buffer: The software buffer ring 163 * @buffer: The software buffer ring
162 * @txd: The hardware descriptor ring 164 * @txd: The hardware descriptor ring
@@ -188,7 +190,6 @@ struct efx_tx_queue {
188 /* Members which don't change on the fast path */ 190 /* Members which don't change on the fast path */
189 struct efx_nic *efx ____cacheline_aligned_in_smp; 191 struct efx_nic *efx ____cacheline_aligned_in_smp;
190 int queue; 192 int queue;
191 int used;
192 struct efx_channel *channel; 193 struct efx_channel *channel;
193 struct efx_nic *nic; 194 struct efx_nic *nic;
194 struct efx_tx_buffer *buffer; 195 struct efx_tx_buffer *buffer;
@@ -699,7 +700,7 @@ struct efx_nic {
699 enum nic_state state; 700 enum nic_state state;
700 enum reset_type reset_pending; 701 enum reset_type reset_pending;
701 702
702 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES]; 703 struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT];
703 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 704 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
704 struct efx_channel channel[EFX_MAX_CHANNELS]; 705 struct efx_channel channel[EFX_MAX_CHANNELS];
705 706
@@ -840,19 +841,15 @@ struct efx_nic_type {
840/* Iterate over all used TX queues */ 841/* Iterate over all used TX queues */
841#define efx_for_each_tx_queue(_tx_queue, _efx) \ 842#define efx_for_each_tx_queue(_tx_queue, _efx) \
842 for (_tx_queue = &_efx->tx_queue[0]; \ 843 for (_tx_queue = &_efx->tx_queue[0]; \
843 _tx_queue < &_efx->tx_queue[EFX_MAX_TX_QUEUES]; \ 844 _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
844 _tx_queue++) \ 845 _tx_queue++)
845 if (!_tx_queue->used) \
846 continue; \
847 else
848 846
849/* Iterate over all TX queues belonging to a channel */ 847/* Iterate over all TX queues belonging to a channel */
850#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 848#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
851 for (_tx_queue = &_channel->efx->tx_queue[0]; \ 849 for (_tx_queue = &_channel->efx->tx_queue[0]; \
852 _tx_queue < &_channel->efx->tx_queue[EFX_MAX_TX_QUEUES]; \ 850 _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
853 _tx_queue++) \ 851 _tx_queue++) \
854 if ((!_tx_queue->used) || \ 852 if (_tx_queue->channel != _channel) \
855 (_tx_queue->channel != _channel)) \
856 continue; \ 853 continue; \
857 else 854 else
858 855
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 3b2de9fe7f27..0a4778629178 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -63,6 +63,10 @@ struct efx_selftest_state {
63 int flush; 63 int flush;
64 int packet_count; 64 int packet_count;
65 struct sk_buff **skbs; 65 struct sk_buff **skbs;
66
67 /* Checksums are being offloaded */
68 int offload_csum;
69
66 atomic_t rx_good; 70 atomic_t rx_good;
67 atomic_t rx_bad; 71 atomic_t rx_bad;
68 struct efx_loopback_payload payload; 72 struct efx_loopback_payload payload;
@@ -292,8 +296,9 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
292 296
293 received = (struct efx_loopback_payload *) buf_ptr; 297 received = (struct efx_loopback_payload *) buf_ptr;
294 received->ip.saddr = payload->ip.saddr; 298 received->ip.saddr = payload->ip.saddr;
295 received->ip.check = payload->ip.check; 299 if (state->offload_csum)
296 300 received->ip.check = payload->ip.check;
301
297 /* Check that header exists */ 302 /* Check that header exists */
298 if (pkt_len < sizeof(received->header)) { 303 if (pkt_len < sizeof(received->header)) {
299 EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " 304 EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
@@ -634,6 +639,8 @@ static int efx_test_loopbacks(struct efx_nic *efx,
634 639
635 /* Test every TX queue */ 640 /* Test every TX queue */
636 efx_for_each_tx_queue(tx_queue, efx) { 641 efx_for_each_tx_queue(tx_queue, efx) {
642 state->offload_csum = (tx_queue->queue ==
643 EFX_TX_QUEUE_OFFLOAD_CSUM);
637 rc |= efx_test_loopback(tx_queue, 644 rc |= efx_test_loopback(tx_queue,
638 &tests->loopback[mode]); 645 &tests->loopback[mode]);
639 if (rc) 646 if (rc)
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index f6999c2b622d..cd59f00e2821 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
18 */ 18 */
19 19
20struct efx_loopback_self_tests { 20struct efx_loopback_self_tests {
21 int tx_sent[EFX_MAX_TX_QUEUES]; 21 int tx_sent[EFX_TX_QUEUE_COUNT];
22 int tx_done[EFX_MAX_TX_QUEUES]; 22 int tx_done[EFX_TX_QUEUE_COUNT];
23 int rx_good; 23 int rx_good;
24 int rx_bad; 24 int rx_bad;
25}; 25};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 2a09101f67eb..e5e0bab313eb 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -368,7 +368,14 @@ inline int efx_xmit(struct efx_nic *efx,
368int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 368int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
369{ 369{
370 struct efx_nic *efx = netdev_priv(net_dev); 370 struct efx_nic *efx = netdev_priv(net_dev);
371 return efx_xmit(efx, &efx->tx_queue[0], skb); 371 struct efx_tx_queue *tx_queue;
372
373 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
374 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
375 else
376 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
377
378 return efx_xmit(efx, tx_queue, skb);
372} 379}
373 380
374void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 381void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
@@ -412,26 +419,21 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
412 /* Allocate software ring */ 419 /* Allocate software ring */
413 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); 420 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
414 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 421 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
415 if (!tx_queue->buffer) { 422 if (!tx_queue->buffer)
416 rc = -ENOMEM; 423 return -ENOMEM;
417 goto fail1;
418 }
419 for (i = 0; i <= efx->type->txd_ring_mask; ++i) 424 for (i = 0; i <= efx->type->txd_ring_mask; ++i)
420 tx_queue->buffer[i].continuation = 1; 425 tx_queue->buffer[i].continuation = 1;
421 426
422 /* Allocate hardware ring */ 427 /* Allocate hardware ring */
423 rc = falcon_probe_tx(tx_queue); 428 rc = falcon_probe_tx(tx_queue);
424 if (rc) 429 if (rc)
425 goto fail2; 430 goto fail;
426 431
427 return 0; 432 return 0;
428 433
429 fail2: 434 fail:
430 kfree(tx_queue->buffer); 435 kfree(tx_queue->buffer);
431 tx_queue->buffer = NULL; 436 tx_queue->buffer = NULL;
432 fail1:
433 tx_queue->used = 0;
434
435 return rc; 437 return rc;
436} 438}
437 439
@@ -494,7 +496,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
494 496
495 kfree(tx_queue->buffer); 497 kfree(tx_queue->buffer);
496 tx_queue->buffer = NULL; 498 tx_queue->buffer = NULL;
497 tx_queue->used = 0;
498} 499}
499 500
500 501