aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/sfc/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/tx.c19
4 files changed, 46 insertions, 9 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 70554a1b2b02..65a8d49106a4 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx)
1503 goto fail2; 1503 goto fail2;
1504 } 1504 }
1505 1505
1506 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
1507 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
1508 rc = -EINVAL;
1509 goto fail3;
1510 }
1506 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; 1511 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1507 1512
1508 rc = efx_probe_filters(efx); 1513 rc = efx_probe_filters(efx);
@@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx)
2070 net_dev->irq = efx->pci_dev->irq; 2075 net_dev->irq = efx->pci_dev->irq;
2071 net_dev->netdev_ops = &efx_netdev_ops; 2076 net_dev->netdev_ops = &efx_netdev_ops;
2072 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 2077 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
2078 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2073 2079
2074 rtnl_lock(); 2080 rtnl_lock();
2075 2081
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index be8f9158a714..70755c97251a 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -30,6 +30,7 @@ extern netdev_tx_t
30efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 30efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
31extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 31extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
32extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); 32extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
33extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
33 34
34/* RX */ 35/* RX */
35extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 36extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
52#define EFX_MAX_EVQ_SIZE 16384UL 53#define EFX_MAX_EVQ_SIZE 16384UL
53#define EFX_MIN_EVQ_SIZE 512UL 54#define EFX_MIN_EVQ_SIZE 512UL
54 55
55/* The smallest [rt]xq_entries that the driver supports. Callers of 56/* Maximum number of TCP segments we support for soft-TSO */
56 * efx_wake_queue() assume that they can subsequently send at least one 57#define EFX_TSO_MAX_SEGS 100
57 * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ 58
58#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) 59/* The smallest [rt]xq_entries that the driver supports. RX minimum
60 * is a bit arbitrary. For TX, we must have space for at least 2
61 * TSO skbs.
62 */
63#define EFX_RXQ_MIN_ENT 128U
64#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
59 65
60/* Filters */ 66/* Filters */
61extern int efx_probe_filters(struct efx_nic *efx); 67extern int efx_probe_filters(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 10536f93b561..8cba2df82b18 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
680 struct ethtool_ringparam *ring) 680 struct ethtool_ringparam *ring)
681{ 681{
682 struct efx_nic *efx = netdev_priv(net_dev); 682 struct efx_nic *efx = netdev_priv(net_dev);
683 u32 txq_entries;
683 684
684 if (ring->rx_mini_pending || ring->rx_jumbo_pending || 685 if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
685 ring->rx_pending > EFX_MAX_DMAQ_SIZE || 686 ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
686 ring->tx_pending > EFX_MAX_DMAQ_SIZE) 687 ring->tx_pending > EFX_MAX_DMAQ_SIZE)
687 return -EINVAL; 688 return -EINVAL;
688 689
689 if (ring->rx_pending < EFX_MIN_RING_SIZE || 690 if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
690 ring->tx_pending < EFX_MIN_RING_SIZE) {
691 netif_err(efx, drv, efx->net_dev, 691 netif_err(efx, drv, efx->net_dev,
692 "TX and RX queues cannot be smaller than %ld\n", 692 "RX queues cannot be smaller than %u\n",
693 EFX_MIN_RING_SIZE); 693 EFX_RXQ_MIN_ENT);
694 return -EINVAL; 694 return -EINVAL;
695 } 695 }
696 696
697 return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); 697 txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
698 if (txq_entries != ring->tx_pending)
699 netif_warn(efx, drv, efx->net_dev,
700 "increasing TX queue size to minimum of %u\n",
701 txq_entries);
702
703 return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
698} 704}
699 705
700static int efx_ethtool_set_pauseparam(struct net_device *net_dev, 706static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 9b225a7769f7..18713436b443 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
119 return len; 119 return len;
120} 120}
121 121
122unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
123{
124 /* Header and payload descriptor for each output segment, plus
125 * one for every input fragment boundary within a segment
126 */
127 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
128
129 /* Possibly one more per segment for the alignment workaround */
130 if (EFX_WORKAROUND_5391(efx))
131 max_descs += EFX_TSO_MAX_SEGS;
132
133 /* Possibly more for PCIe page boundaries within input fragments */
134 if (PAGE_SIZE > EFX_PAGE_SIZE)
135 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
136 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
137
138 return max_descs;
139}
140
122/* 141/*
123 * Add a socket buffer to a TX queue 142 * Add a socket buffer to a TX queue
124 * 143 *