diff options
author | Sasha Neftin <sasha.neftin@intel.com> | 2018-10-11 03:17:19 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2018-10-17 16:20:43 -0400 |
commit | 13b5b7fd6a4a96dffe604f25e7b64cfbd9520924 (patch) | |
tree | 43a5df47a21d4ff121a50369c033c2ca10f6f583 /drivers/net/ethernet/intel/igc/igc_main.c | |
parent | 3df25e4c1e66a69097bde99990fb095b26125c82 (diff) |
igc: Add support for Tx/Rx rings
This change adds the defines and structures necessary to support both Tx
and Rx descriptor rings.
Signed-off-by: Sasha Neftin <sasha.neftin@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igc/igc_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/igc/igc_main.c | 827 |
1 files changed, 827 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 0fd66620cfa1..373ccea86fb0 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c | |||
@@ -37,10 +37,12 @@ static const struct pci_device_id igc_pci_tbl[] = { | |||
37 | MODULE_DEVICE_TABLE(pci, igc_pci_tbl); | 37 | MODULE_DEVICE_TABLE(pci, igc_pci_tbl); |
38 | 38 | ||
39 | /* forward declaration */ | 39 | /* forward declaration */ |
40 | static void igc_clean_tx_ring(struct igc_ring *tx_ring); | ||
40 | static int igc_sw_init(struct igc_adapter *); | 41 | static int igc_sw_init(struct igc_adapter *); |
41 | static void igc_configure(struct igc_adapter *adapter); | 42 | static void igc_configure(struct igc_adapter *adapter); |
42 | static void igc_power_down_link(struct igc_adapter *adapter); | 43 | static void igc_power_down_link(struct igc_adapter *adapter); |
43 | static void igc_set_default_mac_filter(struct igc_adapter *adapter); | 44 | static void igc_set_default_mac_filter(struct igc_adapter *adapter); |
45 | static void igc_set_rx_mode(struct net_device *netdev); | ||
44 | static void igc_write_itr(struct igc_q_vector *q_vector); | 46 | static void igc_write_itr(struct igc_q_vector *q_vector); |
45 | static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector); | 47 | static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector); |
46 | static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx); | 48 | static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx); |
@@ -119,6 +121,527 @@ static void igc_get_hw_control(struct igc_adapter *adapter) | |||
119 | } | 121 | } |
120 | 122 | ||
121 | /** | 123 | /** |
124 | * igc_free_tx_resources - Free Tx Resources per Queue | ||
125 | * @tx_ring: Tx descriptor ring for a specific queue | ||
126 | * | ||
127 | * Free all transmit software resources | ||
128 | */ | ||
129 | static void igc_free_tx_resources(struct igc_ring *tx_ring) | ||
130 | { | ||
131 | igc_clean_tx_ring(tx_ring); | ||
132 | |||
133 | vfree(tx_ring->tx_buffer_info); | ||
134 | tx_ring->tx_buffer_info = NULL; | ||
135 | |||
136 | /* if not set, then don't free */ | ||
137 | if (!tx_ring->desc) | ||
138 | return; | ||
139 | |||
140 | dma_free_coherent(tx_ring->dev, tx_ring->size, | ||
141 | tx_ring->desc, tx_ring->dma); | ||
142 | |||
143 | tx_ring->desc = NULL; | ||
144 | } | ||
145 | |||
146 | /** | ||
147 | * igc_free_all_tx_resources - Free Tx Resources for All Queues | ||
148 | * @adapter: board private structure | ||
149 | * | ||
150 | * Free all transmit software resources | ||
151 | */ | ||
152 | static void igc_free_all_tx_resources(struct igc_adapter *adapter) | ||
153 | { | ||
154 | int i; | ||
155 | |||
156 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
157 | igc_free_tx_resources(adapter->tx_ring[i]); | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * igc_clean_tx_ring - Free Tx Buffers | ||
162 | * @tx_ring: ring to be cleaned | ||
163 | */ | ||
164 | static void igc_clean_tx_ring(struct igc_ring *tx_ring) | ||
165 | { | ||
166 | u16 i = tx_ring->next_to_clean; | ||
167 | struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; | ||
168 | |||
169 | while (i != tx_ring->next_to_use) { | ||
170 | union igc_adv_tx_desc *eop_desc, *tx_desc; | ||
171 | |||
172 | /* Free all the Tx ring sk_buffs */ | ||
173 | dev_kfree_skb_any(tx_buffer->skb); | ||
174 | |||
175 | /* unmap skb header data */ | ||
176 | dma_unmap_single(tx_ring->dev, | ||
177 | dma_unmap_addr(tx_buffer, dma), | ||
178 | dma_unmap_len(tx_buffer, len), | ||
179 | DMA_TO_DEVICE); | ||
180 | |||
181 | /* check for eop_desc to determine the end of the packet */ | ||
182 | eop_desc = tx_buffer->next_to_watch; | ||
183 | tx_desc = IGC_TX_DESC(tx_ring, i); | ||
184 | |||
185 | /* unmap remaining buffers */ | ||
186 | while (tx_desc != eop_desc) { | ||
187 | tx_buffer++; | ||
188 | tx_desc++; | ||
189 | i++; | ||
190 | if (unlikely(i == tx_ring->count)) { | ||
191 | i = 0; | ||
192 | tx_buffer = tx_ring->tx_buffer_info; | ||
193 | tx_desc = IGC_TX_DESC(tx_ring, 0); | ||
194 | } | ||
195 | |||
196 | /* unmap any remaining paged data */ | ||
197 | if (dma_unmap_len(tx_buffer, len)) | ||
198 | dma_unmap_page(tx_ring->dev, | ||
199 | dma_unmap_addr(tx_buffer, dma), | ||
200 | dma_unmap_len(tx_buffer, len), | ||
201 | DMA_TO_DEVICE); | ||
202 | } | ||
203 | |||
204 | /* move us one more past the eop_desc for start of next pkt */ | ||
205 | tx_buffer++; | ||
206 | i++; | ||
207 | if (unlikely(i == tx_ring->count)) { | ||
208 | i = 0; | ||
209 | tx_buffer = tx_ring->tx_buffer_info; | ||
210 | } | ||
211 | } | ||
212 | |||
213 | /* reset BQL for queue */ | ||
214 | netdev_tx_reset_queue(txring_txq(tx_ring)); | ||
215 | |||
216 | /* reset next_to_use and next_to_clean */ | ||
217 | tx_ring->next_to_use = 0; | ||
218 | tx_ring->next_to_clean = 0; | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * igc_setup_tx_resources - allocate Tx resources (Descriptors) | ||
223 | * @tx_ring: tx descriptor ring (for a specific queue) to setup | ||
224 | * | ||
225 | * Return 0 on success, negative on failure | ||
226 | */ | ||
227 | static int igc_setup_tx_resources(struct igc_ring *tx_ring) | ||
228 | { | ||
229 | struct device *dev = tx_ring->dev; | ||
230 | int size = 0; | ||
231 | |||
232 | size = sizeof(struct igc_tx_buffer) * tx_ring->count; | ||
233 | tx_ring->tx_buffer_info = vzalloc(size); | ||
234 | if (!tx_ring->tx_buffer_info) | ||
235 | goto err; | ||
236 | |||
237 | /* round up to nearest 4K */ | ||
238 | tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); | ||
239 | tx_ring->size = ALIGN(tx_ring->size, 4096); | ||
240 | |||
241 | tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, | ||
242 | &tx_ring->dma, GFP_KERNEL); | ||
243 | |||
244 | if (!tx_ring->desc) | ||
245 | goto err; | ||
246 | |||
247 | tx_ring->next_to_use = 0; | ||
248 | tx_ring->next_to_clean = 0; | ||
249 | |||
250 | return 0; | ||
251 | |||
252 | err: | ||
253 | vfree(tx_ring->tx_buffer_info); | ||
254 | dev_err(dev, | ||
255 | "Unable to allocate memory for the transmit descriptor ring\n"); | ||
256 | return -ENOMEM; | ||
257 | } | ||
258 | |||
259 | /** | ||
260 | * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues | ||
261 | * @adapter: board private structure | ||
262 | * | ||
263 | * Return 0 on success, negative on failure | ||
264 | */ | ||
265 | static int igc_setup_all_tx_resources(struct igc_adapter *adapter) | ||
266 | { | ||
267 | struct pci_dev *pdev = adapter->pdev; | ||
268 | int i, err = 0; | ||
269 | |||
270 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
271 | err = igc_setup_tx_resources(adapter->tx_ring[i]); | ||
272 | if (err) { | ||
273 | dev_err(&pdev->dev, | ||
274 | "Allocation for Tx Queue %u failed\n", i); | ||
275 | for (i--; i >= 0; i--) | ||
276 | igc_free_tx_resources(adapter->tx_ring[i]); | ||
277 | break; | ||
278 | } | ||
279 | } | ||
280 | |||
281 | return err; | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * igc_clean_rx_ring - Free Rx Buffers per Queue | ||
286 | * @rx_ring: ring to free buffers from | ||
287 | */ | ||
288 | static void igc_clean_rx_ring(struct igc_ring *rx_ring) | ||
289 | { | ||
290 | u16 i = rx_ring->next_to_clean; | ||
291 | |||
292 | if (rx_ring->skb) | ||
293 | dev_kfree_skb(rx_ring->skb); | ||
294 | rx_ring->skb = NULL; | ||
295 | |||
296 | /* Free all the Rx ring sk_buffs */ | ||
297 | while (i != rx_ring->next_to_alloc) { | ||
298 | struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; | ||
299 | |||
300 | /* Invalidate cache lines that may have been written to by | ||
301 | * device so that we avoid corrupting memory. | ||
302 | */ | ||
303 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
304 | buffer_info->dma, | ||
305 | buffer_info->page_offset, | ||
306 | igc_rx_bufsz(rx_ring), | ||
307 | DMA_FROM_DEVICE); | ||
308 | |||
309 | /* free resources associated with mapping */ | ||
310 | dma_unmap_page_attrs(rx_ring->dev, | ||
311 | buffer_info->dma, | ||
312 | igc_rx_pg_size(rx_ring), | ||
313 | DMA_FROM_DEVICE, | ||
314 | IGC_RX_DMA_ATTR); | ||
315 | __page_frag_cache_drain(buffer_info->page, | ||
316 | buffer_info->pagecnt_bias); | ||
317 | |||
318 | i++; | ||
319 | if (i == rx_ring->count) | ||
320 | i = 0; | ||
321 | } | ||
322 | |||
323 | rx_ring->next_to_alloc = 0; | ||
324 | rx_ring->next_to_clean = 0; | ||
325 | rx_ring->next_to_use = 0; | ||
326 | } | ||
327 | |||
328 | /** | ||
329 | * igc_free_rx_resources - Free Rx Resources | ||
330 | * @rx_ring: ring to clean the resources from | ||
331 | * | ||
332 | * Free all receive software resources | ||
333 | */ | ||
334 | static void igc_free_rx_resources(struct igc_ring *rx_ring) | ||
335 | { | ||
336 | igc_clean_rx_ring(rx_ring); | ||
337 | |||
338 | vfree(rx_ring->rx_buffer_info); | ||
339 | rx_ring->rx_buffer_info = NULL; | ||
340 | |||
341 | /* if not set, then don't free */ | ||
342 | if (!rx_ring->desc) | ||
343 | return; | ||
344 | |||
345 | dma_free_coherent(rx_ring->dev, rx_ring->size, | ||
346 | rx_ring->desc, rx_ring->dma); | ||
347 | |||
348 | rx_ring->desc = NULL; | ||
349 | } | ||
350 | |||
351 | /** | ||
352 | * igc_free_all_rx_resources - Free Rx Resources for All Queues | ||
353 | * @adapter: board private structure | ||
354 | * | ||
355 | * Free all receive software resources | ||
356 | */ | ||
357 | static void igc_free_all_rx_resources(struct igc_adapter *adapter) | ||
358 | { | ||
359 | int i; | ||
360 | |||
361 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
362 | igc_free_rx_resources(adapter->rx_ring[i]); | ||
363 | } | ||
364 | |||
365 | /** | ||
366 | * igc_setup_rx_resources - allocate Rx resources (Descriptors) | ||
367 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | ||
368 | * | ||
369 | * Returns 0 on success, negative on failure | ||
370 | */ | ||
371 | static int igc_setup_rx_resources(struct igc_ring *rx_ring) | ||
372 | { | ||
373 | struct device *dev = rx_ring->dev; | ||
374 | int size, desc_len; | ||
375 | |||
376 | size = sizeof(struct igc_rx_buffer) * rx_ring->count; | ||
377 | rx_ring->rx_buffer_info = vzalloc(size); | ||
378 | if (!rx_ring->rx_buffer_info) | ||
379 | goto err; | ||
380 | |||
381 | desc_len = sizeof(union igc_adv_rx_desc); | ||
382 | |||
383 | /* Round up to nearest 4K */ | ||
384 | rx_ring->size = rx_ring->count * desc_len; | ||
385 | rx_ring->size = ALIGN(rx_ring->size, 4096); | ||
386 | |||
387 | rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, | ||
388 | &rx_ring->dma, GFP_KERNEL); | ||
389 | |||
390 | if (!rx_ring->desc) | ||
391 | goto err; | ||
392 | |||
393 | rx_ring->next_to_alloc = 0; | ||
394 | rx_ring->next_to_clean = 0; | ||
395 | rx_ring->next_to_use = 0; | ||
396 | |||
397 | return 0; | ||
398 | |||
399 | err: | ||
400 | vfree(rx_ring->rx_buffer_info); | ||
401 | rx_ring->rx_buffer_info = NULL; | ||
402 | dev_err(dev, | ||
403 | "Unable to allocate memory for the receive descriptor ring\n"); | ||
404 | return -ENOMEM; | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * igc_setup_all_rx_resources - wrapper to allocate Rx resources | ||
409 | * (Descriptors) for all queues | ||
410 | * @adapter: board private structure | ||
411 | * | ||
412 | * Return 0 on success, negative on failure | ||
413 | */ | ||
414 | static int igc_setup_all_rx_resources(struct igc_adapter *adapter) | ||
415 | { | ||
416 | struct pci_dev *pdev = adapter->pdev; | ||
417 | int i, err = 0; | ||
418 | |||
419 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
420 | err = igc_setup_rx_resources(adapter->rx_ring[i]); | ||
421 | if (err) { | ||
422 | dev_err(&pdev->dev, | ||
423 | "Allocation for Rx Queue %u failed\n", i); | ||
424 | for (i--; i >= 0; i--) | ||
425 | igc_free_rx_resources(adapter->rx_ring[i]); | ||
426 | break; | ||
427 | } | ||
428 | } | ||
429 | |||
430 | return err; | ||
431 | } | ||
432 | |||
433 | /** | ||
434 | * igc_configure_rx_ring - Configure a receive ring after Reset | ||
435 | * @adapter: board private structure | ||
436 | * @ring: receive ring to be configured | ||
437 | * | ||
438 | * Configure the Rx unit of the MAC after a reset. | ||
439 | */ | ||
440 | static void igc_configure_rx_ring(struct igc_adapter *adapter, | ||
441 | struct igc_ring *ring) | ||
442 | { | ||
443 | struct igc_hw *hw = &adapter->hw; | ||
444 | union igc_adv_rx_desc *rx_desc; | ||
445 | int reg_idx = ring->reg_idx; | ||
446 | u32 srrctl = 0, rxdctl = 0; | ||
447 | u64 rdba = ring->dma; | ||
448 | |||
449 | /* disable the queue */ | ||
450 | wr32(IGC_RXDCTL(reg_idx), 0); | ||
451 | |||
452 | /* Set DMA base address registers */ | ||
453 | wr32(IGC_RDBAL(reg_idx), | ||
454 | rdba & 0x00000000ffffffffULL); | ||
455 | wr32(IGC_RDBAH(reg_idx), rdba >> 32); | ||
456 | wr32(IGC_RDLEN(reg_idx), | ||
457 | ring->count * sizeof(union igc_adv_rx_desc)); | ||
458 | |||
459 | /* initialize head and tail */ | ||
460 | ring->tail = adapter->io_addr + IGC_RDT(reg_idx); | ||
461 | wr32(IGC_RDH(reg_idx), 0); | ||
462 | writel(0, ring->tail); | ||
463 | |||
464 | /* reset next-to- use/clean to place SW in sync with hardware */ | ||
465 | ring->next_to_clean = 0; | ||
466 | ring->next_to_use = 0; | ||
467 | |||
468 | /* set descriptor configuration */ | ||
469 | srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; | ||
470 | if (ring_uses_large_buffer(ring)) | ||
471 | srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT; | ||
472 | else | ||
473 | srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT; | ||
474 | srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; | ||
475 | |||
476 | wr32(IGC_SRRCTL(reg_idx), srrctl); | ||
477 | |||
478 | rxdctl |= IGC_RX_PTHRESH; | ||
479 | rxdctl |= IGC_RX_HTHRESH << 8; | ||
480 | rxdctl |= IGC_RX_WTHRESH << 16; | ||
481 | |||
482 | /* initialize rx_buffer_info */ | ||
483 | memset(ring->rx_buffer_info, 0, | ||
484 | sizeof(struct igc_rx_buffer) * ring->count); | ||
485 | |||
486 | /* initialize Rx descriptor 0 */ | ||
487 | rx_desc = IGC_RX_DESC(ring, 0); | ||
488 | rx_desc->wb.upper.length = 0; | ||
489 | |||
490 | /* enable receive descriptor fetching */ | ||
491 | rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; | ||
492 | |||
493 | wr32(IGC_RXDCTL(reg_idx), rxdctl); | ||
494 | } | ||
495 | |||
496 | /** | ||
497 | * igc_configure_rx - Configure receive Unit after Reset | ||
498 | * @adapter: board private structure | ||
499 | * | ||
500 | * Configure the Rx unit of the MAC after a reset. | ||
501 | */ | ||
502 | static void igc_configure_rx(struct igc_adapter *adapter) | ||
503 | { | ||
504 | int i; | ||
505 | |||
506 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | ||
507 | * the Base and Length of the Rx Descriptor Ring | ||
508 | */ | ||
509 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
510 | igc_configure_rx_ring(adapter, adapter->rx_ring[i]); | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * igc_configure_tx_ring - Configure transmit ring after Reset | ||
515 | * @adapter: board private structure | ||
516 | * @ring: tx ring to configure | ||
517 | * | ||
518 | * Configure a transmit ring after a reset. | ||
519 | */ | ||
520 | static void igc_configure_tx_ring(struct igc_adapter *adapter, | ||
521 | struct igc_ring *ring) | ||
522 | { | ||
523 | struct igc_hw *hw = &adapter->hw; | ||
524 | int reg_idx = ring->reg_idx; | ||
525 | u64 tdba = ring->dma; | ||
526 | u32 txdctl = 0; | ||
527 | |||
528 | /* disable the queue */ | ||
529 | wr32(IGC_TXDCTL(reg_idx), 0); | ||
530 | wrfl(); | ||
531 | mdelay(10); | ||
532 | |||
533 | wr32(IGC_TDLEN(reg_idx), | ||
534 | ring->count * sizeof(union igc_adv_tx_desc)); | ||
535 | wr32(IGC_TDBAL(reg_idx), | ||
536 | tdba & 0x00000000ffffffffULL); | ||
537 | wr32(IGC_TDBAH(reg_idx), tdba >> 32); | ||
538 | |||
539 | ring->tail = adapter->io_addr + IGC_TDT(reg_idx); | ||
540 | wr32(IGC_TDH(reg_idx), 0); | ||
541 | writel(0, ring->tail); | ||
542 | |||
543 | txdctl |= IGC_TX_PTHRESH; | ||
544 | txdctl |= IGC_TX_HTHRESH << 8; | ||
545 | txdctl |= IGC_TX_WTHRESH << 16; | ||
546 | |||
547 | txdctl |= IGC_TXDCTL_QUEUE_ENABLE; | ||
548 | wr32(IGC_TXDCTL(reg_idx), txdctl); | ||
549 | } | ||
550 | |||
551 | /** | ||
552 | * igc_configure_tx - Configure transmit Unit after Reset | ||
553 | * @adapter: board private structure | ||
554 | * | ||
555 | * Configure the Tx unit of the MAC after a reset. | ||
556 | */ | ||
557 | static void igc_configure_tx(struct igc_adapter *adapter) | ||
558 | { | ||
559 | int i; | ||
560 | |||
561 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
562 | igc_configure_tx_ring(adapter, adapter->tx_ring[i]); | ||
563 | } | ||
564 | |||
565 | /** | ||
566 | * igc_setup_mrqc - configure the multiple receive queue control registers | ||
567 | * @adapter: Board private structure | ||
568 | */ | ||
569 | static void igc_setup_mrqc(struct igc_adapter *adapter) | ||
570 | { | ||
571 | } | ||
572 | |||
573 | /** | ||
574 | * igc_setup_rctl - configure the receive control registers | ||
575 | * @adapter: Board private structure | ||
576 | */ | ||
577 | static void igc_setup_rctl(struct igc_adapter *adapter) | ||
578 | { | ||
579 | struct igc_hw *hw = &adapter->hw; | ||
580 | u32 rctl; | ||
581 | |||
582 | rctl = rd32(IGC_RCTL); | ||
583 | |||
584 | rctl &= ~(3 << IGC_RCTL_MO_SHIFT); | ||
585 | rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); | ||
586 | |||
587 | rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | | ||
588 | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); | ||
589 | |||
590 | /* enable stripping of CRC. Newer features require | ||
591 | * that the HW strips the CRC. | ||
592 | */ | ||
593 | rctl |= IGC_RCTL_SECRC; | ||
594 | |||
595 | /* disable store bad packets and clear size bits. */ | ||
596 | rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); | ||
597 | |||
598 | /* enable LPE to allow for reception of jumbo frames */ | ||
599 | rctl |= IGC_RCTL_LPE; | ||
600 | |||
601 | /* disable queue 0 to prevent tail write w/o re-config */ | ||
602 | wr32(IGC_RXDCTL(0), 0); | ||
603 | |||
604 | /* This is useful for sniffing bad packets. */ | ||
605 | if (adapter->netdev->features & NETIF_F_RXALL) { | ||
606 | /* UPE and MPE will be handled by normal PROMISC logic | ||
607 | * in set_rx_mode | ||
608 | */ | ||
609 | rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ | ||
610 | IGC_RCTL_BAM | /* RX All Bcast Pkts */ | ||
611 | IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ | ||
612 | |||
613 | rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ | ||
614 | IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ | ||
615 | } | ||
616 | |||
617 | wr32(IGC_RCTL, rctl); | ||
618 | } | ||
619 | |||
620 | /** | ||
621 | * igc_setup_tctl - configure the transmit control registers | ||
622 | * @adapter: Board private structure | ||
623 | */ | ||
624 | static void igc_setup_tctl(struct igc_adapter *adapter) | ||
625 | { | ||
626 | struct igc_hw *hw = &adapter->hw; | ||
627 | u32 tctl; | ||
628 | |||
629 | /* disable queue 0 which icould be enabled by default */ | ||
630 | wr32(IGC_TXDCTL(0), 0); | ||
631 | |||
632 | /* Program the Transmit Control Register */ | ||
633 | tctl = rd32(IGC_TCTL); | ||
634 | tctl &= ~IGC_TCTL_CT; | ||
635 | tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | | ||
636 | (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); | ||
637 | |||
638 | /* Enable transmits */ | ||
639 | tctl |= IGC_TCTL_EN; | ||
640 | |||
641 | wr32(IGC_TCTL, tctl); | ||
642 | } | ||
643 | |||
644 | /** | ||
122 | * igc_set_mac - Change the Ethernet Address of the NIC | 645 | * igc_set_mac - Change the Ethernet Address of the NIC |
123 | * @netdev: network interface device structure | 646 | * @netdev: network interface device structure |
124 | * @p: pointer to an address structure | 647 | * @p: pointer to an address structure |
@@ -150,6 +673,121 @@ static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, | |||
150 | return NETDEV_TX_OK; | 673 | return NETDEV_TX_OK; |
151 | } | 674 | } |
152 | 675 | ||
676 | static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) | ||
677 | { | ||
678 | return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0; | ||
679 | } | ||
680 | |||
681 | static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, | ||
682 | struct igc_rx_buffer *bi) | ||
683 | { | ||
684 | struct page *page = bi->page; | ||
685 | dma_addr_t dma; | ||
686 | |||
687 | /* since we are recycling buffers we should seldom need to alloc */ | ||
688 | if (likely(page)) | ||
689 | return true; | ||
690 | |||
691 | /* alloc new page for storage */ | ||
692 | page = dev_alloc_pages(igc_rx_pg_order(rx_ring)); | ||
693 | if (unlikely(!page)) { | ||
694 | rx_ring->rx_stats.alloc_failed++; | ||
695 | return false; | ||
696 | } | ||
697 | |||
698 | /* map page for use */ | ||
699 | dma = dma_map_page_attrs(rx_ring->dev, page, 0, | ||
700 | igc_rx_pg_size(rx_ring), | ||
701 | DMA_FROM_DEVICE, | ||
702 | IGC_RX_DMA_ATTR); | ||
703 | |||
704 | /* if mapping failed free memory back to system since | ||
705 | * there isn't much point in holding memory we can't use | ||
706 | */ | ||
707 | if (dma_mapping_error(rx_ring->dev, dma)) { | ||
708 | __free_page(page); | ||
709 | |||
710 | rx_ring->rx_stats.alloc_failed++; | ||
711 | return false; | ||
712 | } | ||
713 | |||
714 | bi->dma = dma; | ||
715 | bi->page = page; | ||
716 | bi->page_offset = igc_rx_offset(rx_ring); | ||
717 | bi->pagecnt_bias = 1; | ||
718 | |||
719 | return true; | ||
720 | } | ||
721 | |||
722 | /** | ||
723 | * igc_alloc_rx_buffers - Replace used receive buffers; packet split | ||
724 | * @adapter: address of board private structure | ||
725 | */ | ||
726 | static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) | ||
727 | { | ||
728 | union igc_adv_rx_desc *rx_desc; | ||
729 | u16 i = rx_ring->next_to_use; | ||
730 | struct igc_rx_buffer *bi; | ||
731 | u16 bufsz; | ||
732 | |||
733 | /* nothing to do */ | ||
734 | if (!cleaned_count) | ||
735 | return; | ||
736 | |||
737 | rx_desc = IGC_RX_DESC(rx_ring, i); | ||
738 | bi = &rx_ring->rx_buffer_info[i]; | ||
739 | i -= rx_ring->count; | ||
740 | |||
741 | bufsz = igc_rx_bufsz(rx_ring); | ||
742 | |||
743 | do { | ||
744 | if (!igc_alloc_mapped_page(rx_ring, bi)) | ||
745 | break; | ||
746 | |||
747 | /* sync the buffer for use by the device */ | ||
748 | dma_sync_single_range_for_device(rx_ring->dev, bi->dma, | ||
749 | bi->page_offset, bufsz, | ||
750 | DMA_FROM_DEVICE); | ||
751 | |||
752 | /* Refresh the desc even if buffer_addrs didn't change | ||
753 | * because each write-back erases this info. | ||
754 | */ | ||
755 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); | ||
756 | |||
757 | rx_desc++; | ||
758 | bi++; | ||
759 | i++; | ||
760 | if (unlikely(!i)) { | ||
761 | rx_desc = IGC_RX_DESC(rx_ring, 0); | ||
762 | bi = rx_ring->rx_buffer_info; | ||
763 | i -= rx_ring->count; | ||
764 | } | ||
765 | |||
766 | /* clear the length for the next_to_use descriptor */ | ||
767 | rx_desc->wb.upper.length = 0; | ||
768 | |||
769 | cleaned_count--; | ||
770 | } while (cleaned_count); | ||
771 | |||
772 | i += rx_ring->count; | ||
773 | |||
774 | if (rx_ring->next_to_use != i) { | ||
775 | /* record the next descriptor to use */ | ||
776 | rx_ring->next_to_use = i; | ||
777 | |||
778 | /* update next to alloc since we have filled the ring */ | ||
779 | rx_ring->next_to_alloc = i; | ||
780 | |||
781 | /* Force memory writes to complete before letting h/w | ||
782 | * know there are new descriptors to fetch. (Only | ||
783 | * applicable for weak-ordered memory model archs, | ||
784 | * such as IA-64). | ||
785 | */ | ||
786 | wmb(); | ||
787 | writel(i, rx_ring->tail); | ||
788 | } | ||
789 | } | ||
790 | |||
153 | /** | 791 | /** |
154 | * igc_ioctl - I/O control method | 792 | * igc_ioctl - I/O control method |
155 | * @netdev: network interface device structure | 793 | * @netdev: network interface device structure |
@@ -189,6 +827,11 @@ static void igc_up(struct igc_adapter *adapter) | |||
189 | /* Clear any pending interrupts. */ | 827 | /* Clear any pending interrupts. */ |
190 | rd32(IGC_ICR); | 828 | rd32(IGC_ICR); |
191 | igc_irq_enable(adapter); | 829 | igc_irq_enable(adapter); |
830 | |||
831 | netif_tx_start_all_queues(adapter->netdev); | ||
832 | |||
833 | /* start the watchdog. */ | ||
834 | hw->mac.get_link_status = 1; | ||
192 | } | 835 | } |
193 | 836 | ||
194 | /** | 837 | /** |
@@ -287,7 +930,30 @@ static struct net_device_stats *igc_get_stats(struct net_device *netdev) | |||
287 | */ | 930 | */ |
288 | static void igc_configure(struct igc_adapter *adapter) | 931 | static void igc_configure(struct igc_adapter *adapter) |
289 | { | 932 | { |
933 | struct net_device *netdev = adapter->netdev; | ||
934 | int i = 0; | ||
935 | |||
290 | igc_get_hw_control(adapter); | 936 | igc_get_hw_control(adapter); |
937 | igc_set_rx_mode(netdev); | ||
938 | |||
939 | igc_setup_tctl(adapter); | ||
940 | igc_setup_mrqc(adapter); | ||
941 | igc_setup_rctl(adapter); | ||
942 | |||
943 | igc_configure_tx(adapter); | ||
944 | igc_configure_rx(adapter); | ||
945 | |||
946 | igc_rx_fifo_flush_base(&adapter->hw); | ||
947 | |||
948 | /* call igc_desc_unused which always leaves | ||
949 | * at least 1 descriptor unused to make sure | ||
950 | * next_to_use != next_to_clean | ||
951 | */ | ||
952 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
953 | struct igc_ring *ring = adapter->rx_ring[i]; | ||
954 | |||
955 | igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); | ||
956 | } | ||
291 | } | 957 | } |
292 | 958 | ||
293 | /** | 959 | /** |
@@ -336,6 +1002,19 @@ static void igc_set_default_mac_filter(struct igc_adapter *adapter) | |||
336 | } | 1002 | } |
337 | 1003 | ||
338 | /** | 1004 | /** |
1005 | * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set | ||
1006 | * @netdev: network interface device structure | ||
1007 | * | ||
1008 | * The set_rx_mode entry point is called whenever the unicast or multicast | ||
1009 | * address lists or the network interface flags are updated. This routine is | ||
1010 | * responsible for configuring the hardware for proper unicast, multicast, | ||
1011 | * promiscuous mode, and all-multi behavior. | ||
1012 | */ | ||
1013 | static void igc_set_rx_mode(struct net_device *netdev) | ||
1014 | { | ||
1015 | } | ||
1016 | |||
1017 | /** | ||
339 | * igc_msix_other - msix other interrupt handler | 1018 | * igc_msix_other - msix other interrupt handler |
340 | * @irq: interrupt number | 1019 | * @irq: interrupt number |
341 | * @data: pointer to a q_vector | 1020 | * @data: pointer to a q_vector |
@@ -784,6 +1463,83 @@ static void igc_update_itr(struct igc_q_vector *q_vector, | |||
784 | ring_container->itr = itrval; | 1463 | ring_container->itr = itrval; |
785 | } | 1464 | } |
786 | 1465 | ||
1466 | /** | ||
1467 | * igc_intr_msi - Interrupt Handler | ||
1468 | * @irq: interrupt number | ||
1469 | * @data: pointer to a network interface device structure | ||
1470 | */ | ||
1471 | static irqreturn_t igc_intr_msi(int irq, void *data) | ||
1472 | { | ||
1473 | struct igc_adapter *adapter = data; | ||
1474 | struct igc_q_vector *q_vector = adapter->q_vector[0]; | ||
1475 | struct igc_hw *hw = &adapter->hw; | ||
1476 | /* read ICR disables interrupts using IAM */ | ||
1477 | u32 icr = rd32(IGC_ICR); | ||
1478 | |||
1479 | igc_write_itr(q_vector); | ||
1480 | |||
1481 | if (icr & IGC_ICR_DRSTA) | ||
1482 | schedule_work(&adapter->reset_task); | ||
1483 | |||
1484 | if (icr & IGC_ICR_DOUTSYNC) { | ||
1485 | /* HW is reporting DMA is out of sync */ | ||
1486 | adapter->stats.doosync++; | ||
1487 | } | ||
1488 | |||
1489 | if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { | ||
1490 | hw->mac.get_link_status = 1; | ||
1491 | if (!test_bit(__IGC_DOWN, &adapter->state)) | ||
1492 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | ||
1493 | } | ||
1494 | |||
1495 | napi_schedule(&q_vector->napi); | ||
1496 | |||
1497 | return IRQ_HANDLED; | ||
1498 | } | ||
1499 | |||
1500 | /** | ||
1501 | * igc_intr - Legacy Interrupt Handler | ||
1502 | * @irq: interrupt number | ||
1503 | * @data: pointer to a network interface device structure | ||
1504 | */ | ||
1505 | static irqreturn_t igc_intr(int irq, void *data) | ||
1506 | { | ||
1507 | struct igc_adapter *adapter = data; | ||
1508 | struct igc_q_vector *q_vector = adapter->q_vector[0]; | ||
1509 | struct igc_hw *hw = &adapter->hw; | ||
1510 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No | ||
1511 | * need for the IMC write | ||
1512 | */ | ||
1513 | u32 icr = rd32(IGC_ICR); | ||
1514 | |||
1515 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | ||
1516 | * not set, then the adapter didn't send an interrupt | ||
1517 | */ | ||
1518 | if (!(icr & IGC_ICR_INT_ASSERTED)) | ||
1519 | return IRQ_NONE; | ||
1520 | |||
1521 | igc_write_itr(q_vector); | ||
1522 | |||
1523 | if (icr & IGC_ICR_DRSTA) | ||
1524 | schedule_work(&adapter->reset_task); | ||
1525 | |||
1526 | if (icr & IGC_ICR_DOUTSYNC) { | ||
1527 | /* HW is reporting DMA is out of sync */ | ||
1528 | adapter->stats.doosync++; | ||
1529 | } | ||
1530 | |||
1531 | if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { | ||
1532 | hw->mac.get_link_status = 1; | ||
1533 | /* guard against interrupt when we're going down */ | ||
1534 | if (!test_bit(__IGC_DOWN, &adapter->state)) | ||
1535 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | ||
1536 | } | ||
1537 | |||
1538 | napi_schedule(&q_vector->napi); | ||
1539 | |||
1540 | return IRQ_HANDLED; | ||
1541 | } | ||
1542 | |||
787 | static void igc_set_itr(struct igc_q_vector *q_vector) | 1543 | static void igc_set_itr(struct igc_q_vector *q_vector) |
788 | { | 1544 | { |
789 | struct igc_adapter *adapter = q_vector->adapter; | 1545 | struct igc_adapter *adapter = q_vector->adapter; |
@@ -1147,6 +1903,29 @@ err_out: | |||
1147 | } | 1903 | } |
1148 | 1904 | ||
1149 | /** | 1905 | /** |
1906 | * igc_cache_ring_register - Descriptor ring to register mapping | ||
1907 | * @adapter: board private structure to initialize | ||
1908 | * | ||
1909 | * Once we know the feature-set enabled for the device, we'll cache | ||
1910 | * the register offset the descriptor ring is assigned to. | ||
1911 | */ | ||
1912 | static void igc_cache_ring_register(struct igc_adapter *adapter) | ||
1913 | { | ||
1914 | int i = 0, j = 0; | ||
1915 | |||
1916 | switch (adapter->hw.mac.type) { | ||
1917 | case igc_i225: | ||
1918 | /* Fall through */ | ||
1919 | default: | ||
1920 | for (; i < adapter->num_rx_queues; i++) | ||
1921 | adapter->rx_ring[i]->reg_idx = i; | ||
1922 | for (; j < adapter->num_tx_queues; j++) | ||
1923 | adapter->tx_ring[j]->reg_idx = j; | ||
1924 | break; | ||
1925 | } | ||
1926 | } | ||
1927 | |||
1928 | /** | ||
1150 | * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors | 1929 | * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors |
1151 | * @adapter: Pointer to adapter structure | 1930 | * @adapter: Pointer to adapter structure |
1152 | * | 1931 | * |
@@ -1165,6 +1944,8 @@ static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) | |||
1165 | goto err_alloc_q_vectors; | 1944 | goto err_alloc_q_vectors; |
1166 | } | 1945 | } |
1167 | 1946 | ||
1947 | igc_cache_ring_register(adapter); | ||
1948 | |||
1168 | return 0; | 1949 | return 0; |
1169 | 1950 | ||
1170 | err_alloc_q_vectors: | 1951 | err_alloc_q_vectors: |
@@ -1252,6 +2033,8 @@ static void igc_irq_enable(struct igc_adapter *adapter) | |||
1252 | */ | 2033 | */ |
1253 | static int igc_request_irq(struct igc_adapter *adapter) | 2034 | static int igc_request_irq(struct igc_adapter *adapter) |
1254 | { | 2035 | { |
2036 | struct net_device *netdev = adapter->netdev; | ||
2037 | struct pci_dev *pdev = adapter->pdev; | ||
1255 | int err = 0; | 2038 | int err = 0; |
1256 | 2039 | ||
1257 | if (adapter->flags & IGC_FLAG_HAS_MSIX) { | 2040 | if (adapter->flags & IGC_FLAG_HAS_MSIX) { |
@@ -1259,14 +2042,38 @@ static int igc_request_irq(struct igc_adapter *adapter) | |||
1259 | if (!err) | 2042 | if (!err) |
1260 | goto request_done; | 2043 | goto request_done; |
1261 | /* fall back to MSI */ | 2044 | /* fall back to MSI */ |
2045 | igc_free_all_tx_resources(adapter); | ||
2046 | igc_free_all_rx_resources(adapter); | ||
1262 | 2047 | ||
1263 | igc_clear_interrupt_scheme(adapter); | 2048 | igc_clear_interrupt_scheme(adapter); |
1264 | err = igc_init_interrupt_scheme(adapter, false); | 2049 | err = igc_init_interrupt_scheme(adapter, false); |
1265 | if (err) | 2050 | if (err) |
1266 | goto request_done; | 2051 | goto request_done; |
2052 | igc_setup_all_tx_resources(adapter); | ||
2053 | igc_setup_all_rx_resources(adapter); | ||
1267 | igc_configure(adapter); | 2054 | igc_configure(adapter); |
1268 | } | 2055 | } |
1269 | 2056 | ||
2057 | igc_assign_vector(adapter->q_vector[0], 0); | ||
2058 | |||
2059 | if (adapter->flags & IGC_FLAG_HAS_MSI) { | ||
2060 | err = request_irq(pdev->irq, &igc_intr_msi, 0, | ||
2061 | netdev->name, adapter); | ||
2062 | if (!err) | ||
2063 | goto request_done; | ||
2064 | |||
2065 | /* fall back to legacy interrupts */ | ||
2066 | igc_reset_interrupt_capability(adapter); | ||
2067 | adapter->flags &= ~IGC_FLAG_HAS_MSI; | ||
2068 | } | ||
2069 | |||
2070 | err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, | ||
2071 | netdev->name, adapter); | ||
2072 | |||
2073 | if (err) | ||
2074 | dev_err(&pdev->dev, "Error %d getting interrupt\n", | ||
2075 | err); | ||
2076 | |||
1270 | request_done: | 2077 | request_done: |
1271 | return err; | 2078 | return err; |
1272 | } | 2079 | } |
@@ -1315,6 +2122,16 @@ static int __igc_open(struct net_device *netdev, bool resuming) | |||
1315 | 2122 | ||
1316 | netif_carrier_off(netdev); | 2123 | netif_carrier_off(netdev); |
1317 | 2124 | ||
2125 | /* allocate transmit descriptors */ | ||
2126 | err = igc_setup_all_tx_resources(adapter); | ||
2127 | if (err) | ||
2128 | goto err_setup_tx; | ||
2129 | |||
2130 | /* allocate receive descriptors */ | ||
2131 | err = igc_setup_all_rx_resources(adapter); | ||
2132 | if (err) | ||
2133 | goto err_setup_rx; | ||
2134 | |||
1318 | igc_power_up_link(adapter); | 2135 | igc_power_up_link(adapter); |
1319 | 2136 | ||
1320 | igc_configure(adapter); | 2137 | igc_configure(adapter); |
@@ -1341,6 +2158,8 @@ static int __igc_open(struct net_device *netdev, bool resuming) | |||
1341 | rd32(IGC_ICR); | 2158 | rd32(IGC_ICR); |
1342 | igc_irq_enable(adapter); | 2159 | igc_irq_enable(adapter); |
1343 | 2160 | ||
2161 | netif_tx_start_all_queues(netdev); | ||
2162 | |||
1344 | /* start the watchdog. */ | 2163 | /* start the watchdog. */ |
1345 | hw->mac.get_link_status = 1; | 2164 | hw->mac.get_link_status = 1; |
1346 | 2165 | ||
@@ -1351,6 +2170,11 @@ err_set_queues: | |||
1351 | err_req_irq: | 2170 | err_req_irq: |
1352 | igc_release_hw_control(adapter); | 2171 | igc_release_hw_control(adapter); |
1353 | igc_power_down_link(adapter); | 2172 | igc_power_down_link(adapter); |
2173 | igc_free_all_rx_resources(adapter); | ||
2174 | err_setup_rx: | ||
2175 | igc_free_all_tx_resources(adapter); | ||
2176 | err_setup_tx: | ||
2177 | igc_reset(adapter); | ||
1354 | 2178 | ||
1355 | return err; | 2179 | return err; |
1356 | } | 2180 | } |
@@ -1383,6 +2207,9 @@ static int __igc_close(struct net_device *netdev, bool suspending) | |||
1383 | 2207 | ||
1384 | igc_free_irq(adapter); | 2208 | igc_free_irq(adapter); |
1385 | 2209 | ||
2210 | igc_free_all_tx_resources(adapter); | ||
2211 | igc_free_all_rx_resources(adapter); | ||
2212 | |||
1386 | return 0; | 2213 | return 0; |
1387 | } | 2214 | } |
1388 | 2215 | ||