aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/nic.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc/nic.c')
-rw-r--r--drivers/net/sfc/nic.c73
1 files changed, 39 insertions, 34 deletions
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index da386599ab68..e8396614daf3 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -41,26 +41,6 @@
41#define RX_DC_ENTRIES 64 41#define RX_DC_ENTRIES 64
42#define RX_DC_ENTRIES_ORDER 3 42#define RX_DC_ENTRIES_ORDER 3
43 43
44/* RX FIFO XOFF watermark
45 *
46 * When the amount of the RX FIFO increases used increases past this
47 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
48 * This also has an effect on RX/TX arbitration
49 */
50int efx_nic_rx_xoff_thresh = -1;
51module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
52MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
53
54/* RX FIFO XON watermark
55 *
56 * When the amount of the RX FIFO used decreases below this
57 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
58 * This also has an effect on RX/TX arbitration
59 */
60int efx_nic_rx_xon_thresh = -1;
61module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
62MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
63
64/* If EFX_MAX_INT_ERRORS internal errors occur within 44/* If EFX_MAX_INT_ERRORS internal errors occur within
65 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 45 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
66 * disable it. 46 * disable it.
@@ -445,8 +425,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
445 425
446void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 426void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
447{ 427{
448 efx_oword_t tx_desc_ptr;
449 struct efx_nic *efx = tx_queue->efx; 428 struct efx_nic *efx = tx_queue->efx;
429 efx_oword_t reg;
450 430
451 tx_queue->flushed = FLUSH_NONE; 431 tx_queue->flushed = FLUSH_NONE;
452 432
@@ -454,7 +434,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
454 efx_init_special_buffer(efx, &tx_queue->txd); 434 efx_init_special_buffer(efx, &tx_queue->txd);
455 435
456 /* Push TX descriptor ring to card */ 436 /* Push TX descriptor ring to card */
457 EFX_POPULATE_OWORD_10(tx_desc_ptr, 437 EFX_POPULATE_OWORD_10(reg,
458 FRF_AZ_TX_DESCQ_EN, 1, 438 FRF_AZ_TX_DESCQ_EN, 1,
459 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 439 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
460 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 440 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +450,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
470 450
471 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 451 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
472 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 452 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
473 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 453 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
474 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 454 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
475 !csum); 455 !csum);
476 } 456 }
477 457
478 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 458 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
479 tx_queue->queue); 459 tx_queue->queue);
480 460
481 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 461 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
482 efx_oword_t reg;
483
484 /* Only 128 bits in this register */ 462 /* Only 128 bits in this register */
485 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 463 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
486 464
@@ -491,6 +469,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
491 set_bit_le(tx_queue->queue, (void *)&reg); 469 set_bit_le(tx_queue->queue, (void *)&reg);
492 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG); 470 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
493 } 471 }
472
473 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
474 EFX_POPULATE_OWORD_1(reg,
475 FRF_BZ_TX_PACE,
476 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
477 FFE_BZ_TX_PACE_OFF :
478 FFE_BZ_TX_PACE_RESERVED);
479 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
480 tx_queue->queue);
481 }
494} 482}
495 483
496static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 484static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1226,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1238 1226
1239 /* Flush all tx queues in parallel */ 1227 /* Flush all tx queues in parallel */
1240 efx_for_each_channel(channel, efx) { 1228 efx_for_each_channel(channel, efx) {
1241 efx_for_each_channel_tx_queue(tx_queue, channel) 1229 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1242 efx_flush_tx_queue(tx_queue); 1230 if (tx_queue->initialised)
1231 efx_flush_tx_queue(tx_queue);
1232 }
1243 } 1233 }
1244 1234
1245 /* The hardware supports four concurrent rx flushes, each of which may 1235 /* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1252,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1262 ++rx_pending; 1252 ++rx_pending;
1263 } 1253 }
1264 } 1254 }
1265 efx_for_each_channel_tx_queue(tx_queue, channel) { 1255 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1266 if (tx_queue->flushed != FLUSH_DONE) 1256 if (tx_queue->initialised &&
1257 tx_queue->flushed != FLUSH_DONE)
1267 ++tx_pending; 1258 ++tx_pending;
1268 } 1259 }
1269 } 1260 }
@@ -1278,8 +1269,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1278 /* Mark the queues as all flushed. We're going to return failure 1269 /* Mark the queues as all flushed. We're going to return failure
1279 * leading to a reset, or fake up success anyway */ 1270 * leading to a reset, or fake up success anyway */
1280 efx_for_each_channel(channel, efx) { 1271 efx_for_each_channel(channel, efx) {
1281 efx_for_each_channel_tx_queue(tx_queue, channel) { 1272 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1282 if (tx_queue->flushed != FLUSH_DONE) 1273 if (tx_queue->initialised &&
1274 tx_queue->flushed != FLUSH_DONE)
1283 netif_err(efx, hw, efx->net_dev, 1275 netif_err(efx, hw, efx->net_dev,
1284 "tx queue %d flush command timed out\n", 1276 "tx queue %d flush command timed out\n",
1285 tx_queue->queue); 1277 tx_queue->queue);
@@ -1682,6 +1674,19 @@ void efx_nic_init_common(struct efx_nic *efx)
1682 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1674 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1683 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1675 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1684 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1676 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1677
1678 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1679 EFX_POPULATE_OWORD_4(temp,
1680 /* Default values */
1681 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1682 FRF_BZ_TX_PACE_SB_AF, 0xb,
1683 FRF_BZ_TX_PACE_FB_BASE, 0,
1684 /* Allow large pace values in the
1685 * fast bin. */
1686 FRF_BZ_TX_PACE_BIN_TH,
1687 FFE_BZ_TX_PACE_RESERVED);
1688 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1689 }
1685} 1690}
1686 1691
1687/* Register dump */ 1692/* Register dump */