aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h188
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c63
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c70
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c4
4 files changed, 323 insertions, 2 deletions
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 826a11714d5e..c641f41a7aba 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -31,7 +31,118 @@
31 31
32#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */ 32#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */
33 33
34#define MAX_QUEUES FM10K_MAX_QUEUES_PF
35
36#define FM10K_MIN_RXD 128
37#define FM10K_MAX_RXD 4096
38#define FM10K_DEFAULT_RXD 256
39
40#define FM10K_MIN_TXD 128
41#define FM10K_MAX_TXD 4096
42#define FM10K_DEFAULT_TXD 256
43#define FM10K_DEFAULT_TX_WORK 256
44
45#define FM10K_RXBUFFER_256 256
46#define FM10K_RXBUFFER_16384 16384
47#define FM10K_RX_HDR_LEN FM10K_RXBUFFER_256
48#if PAGE_SIZE <= FM10K_RXBUFFER_16384
49#define FM10K_RX_BUFSZ (PAGE_SIZE / 2)
50#else
51#define FM10K_RX_BUFSZ FM10K_RXBUFFER_16384
52#endif
53
54/* How many Rx Buffers do we bundle into one write to the hardware ? */
55#define FM10K_RX_BUFFER_WRITE 16 /* Must be power of 2 */
56
57enum fm10k_ring_state_t {
58 __FM10K_TX_DETECT_HANG,
59 __FM10K_HANG_CHECK_ARMED,
60};
61
62#define check_for_tx_hang(ring) \
63 test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
64#define set_check_for_tx_hang(ring) \
65 set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
66#define clear_check_for_tx_hang(ring) \
67 clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
68
69struct fm10k_tx_buffer {
70 struct fm10k_tx_desc *next_to_watch;
71 struct sk_buff *skb;
72 unsigned int bytecount;
73 u16 gso_segs;
74 u16 tx_flags;
75 DEFINE_DMA_UNMAP_ADDR(dma);
76 DEFINE_DMA_UNMAP_LEN(len);
77};
78
79struct fm10k_rx_buffer {
80 dma_addr_t dma;
81 struct page *page;
82 u32 page_offset;
83};
84
85struct fm10k_queue_stats {
86 u64 packets;
87 u64 bytes;
88};
89
90struct fm10k_tx_queue_stats {
91 u64 restart_queue;
92 u64 csum_err;
93 u64 tx_busy;
94 u64 tx_done_old;
95};
96
97struct fm10k_rx_queue_stats {
98 u64 alloc_failed;
99 u64 csum_err;
100 u64 errors;
101};
102
103struct fm10k_ring {
104 struct fm10k_q_vector *q_vector;/* backpointer to host q_vector */
105 struct net_device *netdev; /* netdev ring belongs to */
106 struct device *dev; /* device for DMA mapping */
107 void *desc; /* descriptor ring memory */
108 union {
109 struct fm10k_tx_buffer *tx_buffer;
110 struct fm10k_rx_buffer *rx_buffer;
111 };
112 u32 __iomem *tail;
113 unsigned long state;
114 dma_addr_t dma; /* phys. address of descriptor ring */
115 unsigned int size; /* length in bytes */
116
117 u8 queue_index; /* needed for queue management */
118 u8 reg_idx; /* holds the special value that gets
119 * the hardware register offset
120 * associated with this ring, which is
121 * different for DCB and RSS modes
122 */
123 u8 qos_pc; /* priority class of queue */
124 u16 vid; /* default vlan ID of queue */
125 u16 count; /* amount of descriptors */
126
127 u16 next_to_alloc;
128 u16 next_to_use;
129 u16 next_to_clean;
130
131 struct fm10k_queue_stats stats;
132 struct u64_stats_sync syncp;
133 union {
134 /* Tx */
135 struct fm10k_tx_queue_stats tx_stats;
136 /* Rx */
137 struct {
138 struct fm10k_rx_queue_stats rx_stats;
139 struct sk_buff *skb;
140 };
141 };
142} ____cacheline_internodealigned_in_smp;
143
34struct fm10k_ring_container { 144struct fm10k_ring_container {
145 struct fm10k_ring *ring; /* pointer to linked list of rings */
35 unsigned int total_bytes; /* total bytes processed this int */ 146 unsigned int total_bytes; /* total bytes processed this int */
36 unsigned int total_packets; /* total packets processed this int */ 147 unsigned int total_packets; /* total packets processed this int */
37 u16 work_limit; /* total work allowed per interrupt */ 148 u16 work_limit; /* total work allowed per interrupt */
@@ -46,6 +157,15 @@ struct fm10k_ring_container {
46 157
47#define FM10K_ITR_ENABLE (FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR) 158#define FM10K_ITR_ENABLE (FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR)
48 159
160static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring)
161{
162 return &ring->netdev->_tx[ring->queue_index];
163}
164
165/* iterator for handling rings in ring container */
166#define fm10k_for_each_ring(pos, head) \
167 for (pos = &(head).ring[(head).count]; (--pos) >= (head).ring;)
168
49#define MAX_Q_VECTORS 256 169#define MAX_Q_VECTORS 256
50#define MIN_Q_VECTORS 1 170#define MIN_Q_VECTORS 1
51enum fm10k_non_q_vectors { 171enum fm10k_non_q_vectors {
@@ -68,6 +188,9 @@ struct fm10k_q_vector {
68 char name[IFNAMSIZ + 9]; 188 char name[IFNAMSIZ + 9];
69 189
70 struct rcu_head rcu; /* to avoid race with update stats on free */ 190 struct rcu_head rcu; /* to avoid race with update stats on free */
191
192 /* for dynamic allocation of rings associated with this q_vector */
193 struct fm10k_ring ring[0] ____cacheline_internodealigned_in_smp;
71}; 194};
72 195
73enum fm10k_ring_f_enum { 196enum fm10k_ring_f_enum {
@@ -113,9 +236,15 @@ struct fm10k_intfc {
113 int num_rx_queues; 236 int num_rx_queues;
114 u16 rx_itr; 237 u16 rx_itr;
115 238
239 /* TX */
240 struct fm10k_ring *tx_ring[MAX_QUEUES] ____cacheline_aligned_in_smp;
241
116 u64 rx_overrun_pf; 242 u64 rx_overrun_pf;
117 u64 rx_overrun_vf; 243 u64 rx_overrun_vf;
118 244
245 /* RX */
246 struct fm10k_ring *rx_ring[MAX_QUEUES];
247
119 /* Queueing vectors */ 248 /* Queueing vectors */
120 struct fm10k_q_vector *q_vector[MAX_Q_VECTORS]; 249 struct fm10k_q_vector *q_vector[MAX_Q_VECTORS];
121 struct msix_entry *msix_entries; 250 struct msix_entry *msix_entries;
@@ -176,6 +305,65 @@ static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface)
176 return !test_and_set_bit(__FM10K_MBX_LOCK, &interface->state); 305 return !test_and_set_bit(__FM10K_MBX_LOCK, &interface->state);
177} 306}
178 307
308/* fm10k_test_staterr - test bits in Rx descriptor status and error fields */
309static inline __le32 fm10k_test_staterr(union fm10k_rx_desc *rx_desc,
310 const u32 stat_err_bits)
311{
312 return rx_desc->d.staterr & cpu_to_le32(stat_err_bits);
313}
314
315/* fm10k_desc_unused - calculate if we have unused descriptors */
316static inline u16 fm10k_desc_unused(struct fm10k_ring *ring)
317{
318 s16 unused = ring->next_to_clean - ring->next_to_use - 1;
319
320 return likely(unused < 0) ? unused + ring->count : unused;
321}
322
323#define FM10K_TX_DESC(R, i) \
324 (&(((struct fm10k_tx_desc *)((R)->desc))[i]))
325#define FM10K_RX_DESC(R, i) \
326 (&(((union fm10k_rx_desc *)((R)->desc))[i]))
327
328#define FM10K_MAX_TXD_PWR 14
329#define FM10K_MAX_DATA_PER_TXD (1 << FM10K_MAX_TXD_PWR)
330
331/* Tx Descriptors needed, worst case */
332#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD)
333#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
334
335enum fm10k_tx_flags {
336 /* Tx offload flags */
337 FM10K_TX_FLAGS_CSUM = 0x01,
338};
339
340/* This structure is stored as little endian values as that is the native
341 * format of the Rx descriptor. The ordering of these fields is reversed
342 * from the actual ftag header to allow for a single bswap to take care
343 * of placing all of the values in network order
344 */
345union fm10k_ftag_info {
346 __le64 ftag;
347 struct {
348 /* dglort and sglort combined into a single 32bit desc read */
349 __le32 glort;
350 /* upper 16 bits of vlan are reserved 0 for swpri_type_user */
351 __le32 vlan;
352 } d;
353 struct {
354 __le16 dglort;
355 __le16 sglort;
356 __le16 vlan;
357 __le16 swpri_type_user;
358 } w;
359};
360
361struct fm10k_cb {
362 union fm10k_ftag_info fi;
363};
364
365#define FM10K_CB(skb) ((struct fm10k_cb *)(skb)->cb)
366
179/* main */ 367/* main */
180extern char fm10k_driver_name[]; 368extern char fm10k_driver_name[];
181extern const char fm10k_driver_version[]; 369extern const char fm10k_driver_version[];
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index b0a2ba1a623d..bf84c263df0e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -183,10 +183,12 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
183 unsigned int rxr_count, unsigned int rxr_idx) 183 unsigned int rxr_count, unsigned int rxr_idx)
184{ 184{
185 struct fm10k_q_vector *q_vector; 185 struct fm10k_q_vector *q_vector;
186 struct fm10k_ring *ring;
186 int ring_count, size; 187 int ring_count, size;
187 188
188 ring_count = txr_count + rxr_count; 189 ring_count = txr_count + rxr_count;
189 size = sizeof(struct fm10k_q_vector); 190 size = sizeof(struct fm10k_q_vector) +
191 (sizeof(struct fm10k_ring) * ring_count);
190 192
191 /* allocate q_vector and rings */ 193 /* allocate q_vector and rings */
192 q_vector = kzalloc(size, GFP_KERNEL); 194 q_vector = kzalloc(size, GFP_KERNEL);
@@ -202,14 +204,66 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
202 q_vector->interface = interface; 204 q_vector->interface = interface;
203 q_vector->v_idx = v_idx; 205 q_vector->v_idx = v_idx;
204 206
207 /* initialize pointer to rings */
208 ring = q_vector->ring;
209
205 /* save Tx ring container info */ 210 /* save Tx ring container info */
211 q_vector->tx.ring = ring;
212 q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK;
206 q_vector->tx.itr = interface->tx_itr; 213 q_vector->tx.itr = interface->tx_itr;
207 q_vector->tx.count = txr_count; 214 q_vector->tx.count = txr_count;
208 215
216 while (txr_count) {
217 /* assign generic ring traits */
218 ring->dev = &interface->pdev->dev;
219 ring->netdev = interface->netdev;
220
221 /* configure backlink on ring */
222 ring->q_vector = q_vector;
223
224 /* apply Tx specific ring traits */
225 ring->count = interface->tx_ring_count;
226 ring->queue_index = txr_idx;
227
228 /* assign ring to interface */
229 interface->tx_ring[txr_idx] = ring;
230
231 /* update count and index */
232 txr_count--;
233 txr_idx += v_count;
234
235 /* push pointer to next ring */
236 ring++;
237 }
238
209 /* save Rx ring container info */ 239 /* save Rx ring container info */
240 q_vector->rx.ring = ring;
210 q_vector->rx.itr = interface->rx_itr; 241 q_vector->rx.itr = interface->rx_itr;
211 q_vector->rx.count = rxr_count; 242 q_vector->rx.count = rxr_count;
212 243
244 while (rxr_count) {
245 /* assign generic ring traits */
246 ring->dev = &interface->pdev->dev;
247 ring->netdev = interface->netdev;
248
249 /* configure backlink on ring */
250 ring->q_vector = q_vector;
251
252 /* apply Rx specific ring traits */
253 ring->count = interface->rx_ring_count;
254 ring->queue_index = rxr_idx;
255
256 /* assign ring to interface */
257 interface->rx_ring[rxr_idx] = ring;
258
259 /* update count and index */
260 rxr_count--;
261 rxr_idx += v_count;
262
263 /* push pointer to next ring */
264 ring++;
265 }
266
213 return 0; 267 return 0;
214} 268}
215 269
@@ -225,6 +279,13 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
225static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx) 279static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx)
226{ 280{
227 struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; 281 struct fm10k_q_vector *q_vector = interface->q_vector[v_idx];
282 struct fm10k_ring *ring;
283
284 fm10k_for_each_ring(ring, q_vector->tx)
285 interface->tx_ring[ring->queue_index] = NULL;
286
287 fm10k_for_each_ring(ring, q_vector->rx)
288 interface->rx_ring[ring->queue_index] = NULL;
228 289
229 interface->q_vector[v_idx] = NULL; 290 interface->q_vector[v_idx] = NULL;
230 netif_napi_del(&q_vector->napi); 291 netif_napi_del(&q_vector->napi);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 487efcbb309e..b987bb6a5e1c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -67,10 +67,19 @@ int fm10k_open(struct net_device *netdev)
67 /* setup GLORT assignment for this port */ 67 /* setup GLORT assignment for this port */
68 fm10k_request_glort_range(interface); 68 fm10k_request_glort_range(interface);
69 69
70 /* Notify the stack of the actual queue counts */
71
72 err = netif_set_real_num_rx_queues(netdev,
73 interface->num_rx_queues);
74 if (err)
75 goto err_set_queues;
76
70 fm10k_up(interface); 77 fm10k_up(interface);
71 78
72 return 0; 79 return 0;
73 80
81err_set_queues:
82 fm10k_qv_free_irq(interface);
74err_req_irq: 83err_req_irq:
75 return err; 84 return err;
76} 85}
@@ -474,6 +483,64 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
474 __dev_mc_unsync(netdev, NULL); 483 __dev_mc_unsync(netdev, NULL);
475} 484}
476 485
486/**
487 * fm10k_get_stats64 - Get System Network Statistics
488 * @netdev: network interface device structure
489 * @stats: storage space for 64bit statistics
490 *
491 * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This
492 * function replaces fm10k_get_stats for kernels which support it.
493 */
494static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
495 struct rtnl_link_stats64 *stats)
496{
497 struct fm10k_intfc *interface = netdev_priv(netdev);
498 struct fm10k_ring *ring;
499 unsigned int start, i;
500 u64 bytes, packets;
501
502 rcu_read_lock();
503
504 for (i = 0; i < interface->num_rx_queues; i++) {
505 ring = ACCESS_ONCE(interface->rx_ring[i]);
506
507 if (!ring)
508 continue;
509
510 do {
511 start = u64_stats_fetch_begin_irq(&ring->syncp);
512 packets = ring->stats.packets;
513 bytes = ring->stats.bytes;
514 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
515
516 stats->rx_packets += packets;
517 stats->rx_bytes += bytes;
518 }
519
520 for (i = 0; i < interface->num_tx_queues; i++) {
521 ring = ACCESS_ONCE(interface->rx_ring[i]);
522
523 if (!ring)
524 continue;
525
526 do {
527 start = u64_stats_fetch_begin_irq(&ring->syncp);
528 packets = ring->stats.packets;
529 bytes = ring->stats.bytes;
530 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
531
532 stats->tx_packets += packets;
533 stats->tx_bytes += bytes;
534 }
535
536 rcu_read_unlock();
537
538 /* following stats updated by fm10k_service_task() */
539 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
540
541 return stats;
542}
543
477static const struct net_device_ops fm10k_netdev_ops = { 544static const struct net_device_ops fm10k_netdev_ops = {
478 .ndo_open = fm10k_open, 545 .ndo_open = fm10k_open,
479 .ndo_stop = fm10k_close, 546 .ndo_stop = fm10k_close,
@@ -484,6 +551,7 @@ static const struct net_device_ops fm10k_netdev_ops = {
484 .ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid, 551 .ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid,
485 .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid, 552 .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid,
486 .ndo_set_rx_mode = fm10k_set_rx_mode, 553 .ndo_set_rx_mode = fm10k_set_rx_mode,
554 .ndo_get_stats64 = fm10k_get_stats64,
487}; 555};
488 556
489#define DEFAULT_DEBUG_LEVEL_SHIFT 3 557#define DEFAULT_DEBUG_LEVEL_SHIFT 3
@@ -493,7 +561,7 @@ struct net_device *fm10k_alloc_netdev(void)
493 struct fm10k_intfc *interface; 561 struct fm10k_intfc *interface;
494 struct net_device *dev; 562 struct net_device *dev;
495 563
496 dev = alloc_etherdev(sizeof(struct fm10k_intfc)); 564 dev = alloc_etherdev_mq(sizeof(struct fm10k_intfc), MAX_QUEUES);
497 if (!dev) 565 if (!dev)
498 return NULL; 566 return NULL;
499 567
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 2257ab1f4607..5a28298a19f8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -707,6 +707,10 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
707 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; 707 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
708 } 708 }
709 709
710 /* set default ring sizes */
711 interface->tx_ring_count = FM10K_DEFAULT_TXD;
712 interface->rx_ring_count = FM10K_DEFAULT_RXD;
713
710 /* set default interrupt moderation */ 714 /* set default interrupt moderation */
711 interface->tx_itr = FM10K_ITR_10K; 715 interface->tx_itr = FM10K_ITR_10K;
712 interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; 716 interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K;