diff options
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 1708 |
1 files changed, 1289 insertions, 419 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 07e778d3e5d2..a551a96ce676 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <net/ip6_checksum.h> | 39 | #include <net/ip6_checksum.h> |
40 | #include <linux/ethtool.h> | 40 | #include <linux/ethtool.h> |
41 | #include <linux/if_vlan.h> | 41 | #include <linux/if_vlan.h> |
42 | #include <scsi/fc/fc_fcoe.h> | ||
42 | 43 | ||
43 | #include "ixgbe.h" | 44 | #include "ixgbe.h" |
44 | #include "ixgbe_common.h" | 45 | #include "ixgbe_common.h" |
@@ -47,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe"; | |||
47 | static const char ixgbe_driver_string[] = | 48 | static const char ixgbe_driver_string[] = |
48 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 49 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
49 | 50 | ||
50 | #define DRV_VERSION "2.0.8-k2" | 51 | #define DRV_VERSION "2.0.34-k2" |
51 | const char ixgbe_driver_version[] = DRV_VERSION; | 52 | const char ixgbe_driver_version[] = DRV_VERSION; |
52 | static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; | 53 | static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; |
53 | 54 | ||
@@ -89,6 +90,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = { | |||
89 | board_82598 }, | 90 | board_82598 }, |
90 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), | 91 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), |
91 | board_82599 }, | 92 | board_82599 }, |
93 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), | ||
94 | board_82599 }, | ||
92 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), | 95 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), |
93 | board_82599 }, | 96 | board_82599 }, |
94 | 97 | ||
@@ -183,6 +186,22 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, | |||
183 | } | 186 | } |
184 | } | 187 | } |
185 | 188 | ||
189 | static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, | ||
190 | u64 qmask) | ||
191 | { | ||
192 | u32 mask; | ||
193 | |||
194 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | ||
195 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | ||
196 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); | ||
197 | } else { | ||
198 | mask = (qmask & 0xFFFFFFFF); | ||
199 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); | ||
200 | mask = (qmask >> 32); | ||
201 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); | ||
202 | } | ||
203 | } | ||
204 | |||
186 | static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, | 205 | static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, |
187 | struct ixgbe_tx_buffer | 206 | struct ixgbe_tx_buffer |
188 | *tx_buffer_info) | 207 | *tx_buffer_info) |
@@ -245,14 +264,13 @@ static void ixgbe_tx_timeout(struct net_device *netdev); | |||
245 | 264 | ||
246 | /** | 265 | /** |
247 | * ixgbe_clean_tx_irq - Reclaim resources after transmit completes | 266 | * ixgbe_clean_tx_irq - Reclaim resources after transmit completes |
248 | * @adapter: board private structure | 267 | * @q_vector: structure containing interrupt and ring information |
249 | * @tx_ring: tx ring to clean | 268 | * @tx_ring: tx ring to clean |
250 | * | ||
251 | * returns true if transmit work is done | ||
252 | **/ | 269 | **/ |
253 | static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, | 270 | static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, |
254 | struct ixgbe_ring *tx_ring) | 271 | struct ixgbe_ring *tx_ring) |
255 | { | 272 | { |
273 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
256 | struct net_device *netdev = adapter->netdev; | 274 | struct net_device *netdev = adapter->netdev; |
257 | union ixgbe_adv_tx_desc *tx_desc, *eop_desc; | 275 | union ixgbe_adv_tx_desc *tx_desc, *eop_desc; |
258 | struct ixgbe_tx_buffer *tx_buffer_info; | 276 | struct ixgbe_tx_buffer *tx_buffer_info; |
@@ -275,12 +293,24 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, | |||
275 | 293 | ||
276 | if (cleaned && skb) { | 294 | if (cleaned && skb) { |
277 | unsigned int segs, bytecount; | 295 | unsigned int segs, bytecount; |
296 | unsigned int hlen = skb_headlen(skb); | ||
278 | 297 | ||
279 | /* gso_segs is currently only valid for tcp */ | 298 | /* gso_segs is currently only valid for tcp */ |
280 | segs = skb_shinfo(skb)->gso_segs ?: 1; | 299 | segs = skb_shinfo(skb)->gso_segs ?: 1; |
300 | #ifdef IXGBE_FCOE | ||
301 | /* adjust for FCoE Sequence Offload */ | ||
302 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | ||
303 | && (skb->protocol == htons(ETH_P_FCOE)) && | ||
304 | skb_is_gso(skb)) { | ||
305 | hlen = skb_transport_offset(skb) + | ||
306 | sizeof(struct fc_frame_header) + | ||
307 | sizeof(struct fcoe_crc_eof); | ||
308 | segs = DIV_ROUND_UP(skb->len - hlen, | ||
309 | skb_shinfo(skb)->gso_size); | ||
310 | } | ||
311 | #endif /* IXGBE_FCOE */ | ||
281 | /* multiply data chunks by size of headers */ | 312 | /* multiply data chunks by size of headers */ |
282 | bytecount = ((segs - 1) * skb_headlen(skb)) + | 313 | bytecount = ((segs - 1) * hlen) + skb->len; |
283 | skb->len; | ||
284 | total_packets += segs; | 314 | total_packets += segs; |
285 | total_bytes += bytecount; | 315 | total_bytes += bytecount; |
286 | } | 316 | } |
@@ -327,7 +357,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, | |||
327 | 357 | ||
328 | /* re-arm the interrupt */ | 358 | /* re-arm the interrupt */ |
329 | if (count >= tx_ring->work_limit) | 359 | if (count >= tx_ring->work_limit) |
330 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx); | 360 | ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx)); |
331 | 361 | ||
332 | tx_ring->total_bytes += total_bytes; | 362 | tx_ring->total_bytes += total_bytes; |
333 | tx_ring->total_packets += total_packets; | 363 | tx_ring->total_packets += total_packets; |
@@ -398,6 +428,9 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) | |||
398 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) | 428 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) |
399 | return; | 429 | return; |
400 | 430 | ||
431 | /* always use CB2 mode, difference is masked in the CB driver */ | ||
432 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); | ||
433 | |||
401 | for (i = 0; i < adapter->num_tx_queues; i++) { | 434 | for (i = 0; i < adapter->num_tx_queues; i++) { |
402 | adapter->tx_ring[i].cpu = -1; | 435 | adapter->tx_ring[i].cpu = -1; |
403 | ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]); | 436 | ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]); |
@@ -419,9 +452,6 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
419 | /* if we're already enabled, don't do it again */ | 452 | /* if we're already enabled, don't do it again */ |
420 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 453 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
421 | break; | 454 | break; |
422 | /* Always use CB2 mode, difference is masked | ||
423 | * in the CB driver. */ | ||
424 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); | ||
425 | if (dca_add_requester(dev) == 0) { | 455 | if (dca_add_requester(dev) == 0) { |
426 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; | 456 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
427 | ixgbe_setup_dca(adapter); | 457 | ixgbe_setup_dca(adapter); |
@@ -451,6 +481,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
451 | **/ | 481 | **/ |
452 | static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, | 482 | static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, |
453 | struct sk_buff *skb, u8 status, | 483 | struct sk_buff *skb, u8 status, |
484 | struct ixgbe_ring *ring, | ||
454 | union ixgbe_adv_rx_desc *rx_desc) | 485 | union ixgbe_adv_rx_desc *rx_desc) |
455 | { | 486 | { |
456 | struct ixgbe_adapter *adapter = q_vector->adapter; | 487 | struct ixgbe_adapter *adapter = q_vector->adapter; |
@@ -458,24 +489,17 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, | |||
458 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); | 489 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); |
459 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); | 490 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); |
460 | 491 | ||
461 | skb_record_rx_queue(skb, q_vector - &adapter->q_vector[0]); | 492 | skb_record_rx_queue(skb, ring->queue_index); |
462 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) { | 493 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { |
463 | if (adapter->vlgrp && is_vlan && (tag != 0)) | 494 | if (adapter->vlgrp && is_vlan && (tag != 0)) |
464 | vlan_gro_receive(napi, adapter->vlgrp, tag, skb); | 495 | vlan_gro_receive(napi, adapter->vlgrp, tag, skb); |
465 | else | 496 | else |
466 | napi_gro_receive(napi, skb); | 497 | napi_gro_receive(napi, skb); |
467 | } else { | 498 | } else { |
468 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { | 499 | if (adapter->vlgrp && is_vlan && (tag != 0)) |
469 | if (adapter->vlgrp && is_vlan && (tag != 0)) | 500 | vlan_hwaccel_rx(skb, adapter->vlgrp, tag); |
470 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); | 501 | else |
471 | else | 502 | netif_rx(skb); |
472 | netif_receive_skb(skb); | ||
473 | } else { | ||
474 | if (adapter->vlgrp && is_vlan && (tag != 0)) | ||
475 | vlan_hwaccel_rx(skb, adapter->vlgrp, tag); | ||
476 | else | ||
477 | netif_rx(skb); | ||
478 | } | ||
479 | } | 503 | } |
480 | } | 504 | } |
481 | 505 | ||
@@ -622,6 +646,40 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) | |||
622 | return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; | 646 | return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; |
623 | } | 647 | } |
624 | 648 | ||
649 | static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) | ||
650 | { | ||
651 | return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & | ||
652 | IXGBE_RXDADV_RSCCNT_MASK) >> | ||
653 | IXGBE_RXDADV_RSCCNT_SHIFT; | ||
654 | } | ||
655 | |||
656 | /** | ||
657 | * ixgbe_transform_rsc_queue - change rsc queue into a full packet | ||
658 | * @skb: pointer to the last skb in the rsc queue | ||
659 | * | ||
660 | * This function changes a queue full of hw rsc buffers into a completed | ||
661 | * packet. It uses the ->prev pointers to find the first packet and then | ||
662 | * turns it into the frag list owner. | ||
663 | **/ | ||
664 | static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) | ||
665 | { | ||
666 | unsigned int frag_list_size = 0; | ||
667 | |||
668 | while (skb->prev) { | ||
669 | struct sk_buff *prev = skb->prev; | ||
670 | frag_list_size += skb->len; | ||
671 | skb->prev = NULL; | ||
672 | skb = prev; | ||
673 | } | ||
674 | |||
675 | skb_shinfo(skb)->frag_list = skb->next; | ||
676 | skb->next = NULL; | ||
677 | skb->len += frag_list_size; | ||
678 | skb->data_len += frag_list_size; | ||
679 | skb->truesize += frag_list_size; | ||
680 | return skb; | ||
681 | } | ||
682 | |||
625 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | 683 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
626 | struct ixgbe_ring *rx_ring, | 684 | struct ixgbe_ring *rx_ring, |
627 | int *work_done, int work_to_do) | 685 | int *work_done, int work_to_do) |
@@ -631,12 +689,15 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
631 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; | 689 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; |
632 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; | 690 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; |
633 | struct sk_buff *skb; | 691 | struct sk_buff *skb; |
634 | unsigned int i; | 692 | unsigned int i, rsc_count = 0; |
635 | u32 len, staterr; | 693 | u32 len, staterr; |
636 | u16 hdr_info; | 694 | u16 hdr_info; |
637 | bool cleaned = false; | 695 | bool cleaned = false; |
638 | int cleaned_count = 0; | 696 | int cleaned_count = 0; |
639 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 697 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
698 | #ifdef IXGBE_FCOE | ||
699 | int ddp_bytes = 0; | ||
700 | #endif /* IXGBE_FCOE */ | ||
640 | 701 | ||
641 | i = rx_ring->next_to_clean; | 702 | i = rx_ring->next_to_clean; |
642 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); | 703 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); |
@@ -667,7 +728,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
667 | prefetch(skb->data - NET_IP_ALIGN); | 728 | prefetch(skb->data - NET_IP_ALIGN); |
668 | rx_buffer_info->skb = NULL; | 729 | rx_buffer_info->skb = NULL; |
669 | 730 | ||
670 | if (len && !skb_shinfo(skb)->nr_frags) { | 731 | if (rx_buffer_info->dma) { |
671 | pci_unmap_single(pdev, rx_buffer_info->dma, | 732 | pci_unmap_single(pdev, rx_buffer_info->dma, |
672 | rx_ring->rx_buf_len, | 733 | rx_ring->rx_buf_len, |
673 | PCI_DMA_FROMDEVICE); | 734 | PCI_DMA_FROMDEVICE); |
@@ -697,20 +758,38 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
697 | i++; | 758 | i++; |
698 | if (i == rx_ring->count) | 759 | if (i == rx_ring->count) |
699 | i = 0; | 760 | i = 0; |
700 | next_buffer = &rx_ring->rx_buffer_info[i]; | ||
701 | 761 | ||
702 | next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); | 762 | next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); |
703 | prefetch(next_rxd); | 763 | prefetch(next_rxd); |
704 | |||
705 | cleaned_count++; | 764 | cleaned_count++; |
765 | |||
766 | if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE) | ||
767 | rsc_count = ixgbe_get_rsc_count(rx_desc); | ||
768 | |||
769 | if (rsc_count) { | ||
770 | u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> | ||
771 | IXGBE_RXDADV_NEXTP_SHIFT; | ||
772 | next_buffer = &rx_ring->rx_buffer_info[nextp]; | ||
773 | rx_ring->rsc_count += (rsc_count - 1); | ||
774 | } else { | ||
775 | next_buffer = &rx_ring->rx_buffer_info[i]; | ||
776 | } | ||
777 | |||
706 | if (staterr & IXGBE_RXD_STAT_EOP) { | 778 | if (staterr & IXGBE_RXD_STAT_EOP) { |
779 | if (skb->prev) | ||
780 | skb = ixgbe_transform_rsc_queue(skb); | ||
707 | rx_ring->stats.packets++; | 781 | rx_ring->stats.packets++; |
708 | rx_ring->stats.bytes += skb->len; | 782 | rx_ring->stats.bytes += skb->len; |
709 | } else { | 783 | } else { |
710 | rx_buffer_info->skb = next_buffer->skb; | 784 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
711 | rx_buffer_info->dma = next_buffer->dma; | 785 | rx_buffer_info->skb = next_buffer->skb; |
712 | next_buffer->skb = skb; | 786 | rx_buffer_info->dma = next_buffer->dma; |
713 | next_buffer->dma = 0; | 787 | next_buffer->skb = skb; |
788 | next_buffer->dma = 0; | ||
789 | } else { | ||
790 | skb->next = next_buffer->skb; | ||
791 | skb->next->prev = skb; | ||
792 | } | ||
714 | adapter->non_eop_descs++; | 793 | adapter->non_eop_descs++; |
715 | goto next_desc; | 794 | goto next_desc; |
716 | } | 795 | } |
@@ -727,7 +806,15 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
727 | total_rx_packets++; | 806 | total_rx_packets++; |
728 | 807 | ||
729 | skb->protocol = eth_type_trans(skb, adapter->netdev); | 808 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
730 | ixgbe_receive_skb(q_vector, skb, staterr, rx_desc); | 809 | #ifdef IXGBE_FCOE |
810 | /* if ddp, not passing to ULD unless for FCP_RSP or error */ | ||
811 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
812 | ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); | ||
813 | if (!ddp_bytes) | ||
814 | goto next_desc; | ||
815 | } | ||
816 | #endif /* IXGBE_FCOE */ | ||
817 | ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); | ||
731 | 818 | ||
732 | next_desc: | 819 | next_desc: |
733 | rx_desc->wb.upper.status_error = 0; | 820 | rx_desc->wb.upper.status_error = 0; |
@@ -740,7 +827,7 @@ next_desc: | |||
740 | 827 | ||
741 | /* use prefetched values */ | 828 | /* use prefetched values */ |
742 | rx_desc = next_rxd; | 829 | rx_desc = next_rxd; |
743 | rx_buffer_info = next_buffer; | 830 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
744 | 831 | ||
745 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 832 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
746 | } | 833 | } |
@@ -751,6 +838,21 @@ next_desc: | |||
751 | if (cleaned_count) | 838 | if (cleaned_count) |
752 | ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); | 839 | ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); |
753 | 840 | ||
841 | #ifdef IXGBE_FCOE | ||
842 | /* include DDPed FCoE data */ | ||
843 | if (ddp_bytes > 0) { | ||
844 | unsigned int mss; | ||
845 | |||
846 | mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - | ||
847 | sizeof(struct fc_frame_header) - | ||
848 | sizeof(struct fcoe_crc_eof); | ||
849 | if (mss > 512) | ||
850 | mss &= ~511; | ||
851 | total_rx_bytes += ddp_bytes; | ||
852 | total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); | ||
853 | } | ||
854 | #endif /* IXGBE_FCOE */ | ||
855 | |||
754 | rx_ring->total_packets += total_rx_packets; | 856 | rx_ring->total_packets += total_rx_packets; |
755 | rx_ring->total_bytes += total_rx_bytes; | 857 | rx_ring->total_bytes += total_rx_bytes; |
756 | adapter->net_stats.rx_bytes += total_rx_bytes; | 858 | adapter->net_stats.rx_bytes += total_rx_bytes; |
@@ -780,7 +882,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
780 | * corresponding register. | 882 | * corresponding register. |
781 | */ | 883 | */ |
782 | for (v_idx = 0; v_idx < q_vectors; v_idx++) { | 884 | for (v_idx = 0; v_idx < q_vectors; v_idx++) { |
783 | q_vector = &adapter->q_vector[v_idx]; | 885 | q_vector = adapter->q_vector[v_idx]; |
784 | /* XXX for_each_bit(...) */ | 886 | /* XXX for_each_bit(...) */ |
785 | r_idx = find_first_bit(q_vector->rxr_idx, | 887 | r_idx = find_first_bit(q_vector->rxr_idx, |
786 | adapter->num_rx_queues); | 888 | adapter->num_rx_queues); |
@@ -810,12 +912,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
810 | /* rx only */ | 912 | /* rx only */ |
811 | q_vector->eitr = adapter->eitr_param; | 913 | q_vector->eitr = adapter->eitr_param; |
812 | 914 | ||
813 | /* | 915 | ixgbe_write_eitr(q_vector); |
814 | * since this is initial set up don't need to call | ||
815 | * ixgbe_write_eitr helper | ||
816 | */ | ||
817 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), | ||
818 | EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); | ||
819 | } | 916 | } |
820 | 917 | ||
821 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | 918 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
@@ -900,17 +997,19 @@ update_itr_done: | |||
900 | 997 | ||
901 | /** | 998 | /** |
902 | * ixgbe_write_eitr - write EITR register in hardware specific way | 999 | * ixgbe_write_eitr - write EITR register in hardware specific way |
903 | * @adapter: pointer to adapter struct | 1000 | * @q_vector: structure containing interrupt and ring information |
904 | * @v_idx: vector index into q_vector array | ||
905 | * @itr_reg: new value to be written in *register* format, not ints/s | ||
906 | * | 1001 | * |
907 | * This function is made to be called by ethtool and by the driver | 1002 | * This function is made to be called by ethtool and by the driver |
908 | * when it needs to update EITR registers at runtime. Hardware | 1003 | * when it needs to update EITR registers at runtime. Hardware |
909 | * specific quirks/differences are taken care of here. | 1004 | * specific quirks/differences are taken care of here. |
910 | */ | 1005 | */ |
911 | void ixgbe_write_eitr(struct ixgbe_adapter *adapter, int v_idx, u32 itr_reg) | 1006 | void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) |
912 | { | 1007 | { |
1008 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
913 | struct ixgbe_hw *hw = &adapter->hw; | 1009 | struct ixgbe_hw *hw = &adapter->hw; |
1010 | int v_idx = q_vector->v_idx; | ||
1011 | u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); | ||
1012 | |||
914 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 1013 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
915 | /* must write high and low 16 bits to reset counter */ | 1014 | /* must write high and low 16 bits to reset counter */ |
916 | itr_reg |= (itr_reg << 16); | 1015 | itr_reg |= (itr_reg << 16); |
@@ -929,8 +1028,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
929 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1028 | struct ixgbe_adapter *adapter = q_vector->adapter; |
930 | u32 new_itr; | 1029 | u32 new_itr; |
931 | u8 current_itr, ret_itr; | 1030 | u8 current_itr, ret_itr; |
932 | int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / | 1031 | int i, r_idx; |
933 | sizeof(struct ixgbe_q_vector); | ||
934 | struct ixgbe_ring *rx_ring, *tx_ring; | 1032 | struct ixgbe_ring *rx_ring, *tx_ring; |
935 | 1033 | ||
936 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1034 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
@@ -980,14 +1078,13 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
980 | } | 1078 | } |
981 | 1079 | ||
982 | if (new_itr != q_vector->eitr) { | 1080 | if (new_itr != q_vector->eitr) { |
983 | u32 itr_reg; | 1081 | /* do an exponential smoothing */ |
1082 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | ||
984 | 1083 | ||
985 | /* save the algorithm value here, not the smoothed one */ | 1084 | /* save the algorithm value here, not the smoothed one */ |
986 | q_vector->eitr = new_itr; | 1085 | q_vector->eitr = new_itr; |
987 | /* do an exponential smoothing */ | 1086 | |
988 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | 1087 | ixgbe_write_eitr(q_vector); |
989 | itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); | ||
990 | ixgbe_write_eitr(adapter, v_idx, itr_reg); | ||
991 | } | 1088 | } |
992 | 1089 | ||
993 | return; | 1090 | return; |
@@ -1058,14 +1155,64 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
1058 | if (hw->mac.type == ixgbe_mac_82598EB) | 1155 | if (hw->mac.type == ixgbe_mac_82598EB) |
1059 | ixgbe_check_fan_failure(adapter, eicr); | 1156 | ixgbe_check_fan_failure(adapter, eicr); |
1060 | 1157 | ||
1061 | if (hw->mac.type == ixgbe_mac_82599EB) | 1158 | if (hw->mac.type == ixgbe_mac_82599EB) { |
1062 | ixgbe_check_sfp_event(adapter, eicr); | 1159 | ixgbe_check_sfp_event(adapter, eicr); |
1160 | |||
1161 | /* Handle Flow Director Full threshold interrupt */ | ||
1162 | if (eicr & IXGBE_EICR_FLOW_DIR) { | ||
1163 | int i; | ||
1164 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR); | ||
1165 | /* Disable transmits before FDIR Re-initialization */ | ||
1166 | netif_tx_stop_all_queues(netdev); | ||
1167 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
1168 | struct ixgbe_ring *tx_ring = | ||
1169 | &adapter->tx_ring[i]; | ||
1170 | if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, | ||
1171 | &tx_ring->reinit_state)) | ||
1172 | schedule_work(&adapter->fdir_reinit_task); | ||
1173 | } | ||
1174 | } | ||
1175 | } | ||
1063 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1176 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1064 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); | 1177 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); |
1065 | 1178 | ||
1066 | return IRQ_HANDLED; | 1179 | return IRQ_HANDLED; |
1067 | } | 1180 | } |
1068 | 1181 | ||
1182 | static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, | ||
1183 | u64 qmask) | ||
1184 | { | ||
1185 | u32 mask; | ||
1186 | |||
1187 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | ||
1188 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | ||
1189 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | ||
1190 | } else { | ||
1191 | mask = (qmask & 0xFFFFFFFF); | ||
1192 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); | ||
1193 | mask = (qmask >> 32); | ||
1194 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); | ||
1195 | } | ||
1196 | /* skip the flush */ | ||
1197 | } | ||
1198 | |||
1199 | static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, | ||
1200 | u64 qmask) | ||
1201 | { | ||
1202 | u32 mask; | ||
1203 | |||
1204 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | ||
1205 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | ||
1206 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); | ||
1207 | } else { | ||
1208 | mask = (qmask & 0xFFFFFFFF); | ||
1209 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); | ||
1210 | mask = (qmask >> 32); | ||
1211 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); | ||
1212 | } | ||
1213 | /* skip the flush */ | ||
1214 | } | ||
1215 | |||
1069 | static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) | 1216 | static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) |
1070 | { | 1217 | { |
1071 | struct ixgbe_q_vector *q_vector = data; | 1218 | struct ixgbe_q_vector *q_vector = data; |
@@ -1079,17 +1226,16 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) | |||
1079 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1226 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1080 | for (i = 0; i < q_vector->txr_count; i++) { | 1227 | for (i = 0; i < q_vector->txr_count; i++) { |
1081 | tx_ring = &(adapter->tx_ring[r_idx]); | 1228 | tx_ring = &(adapter->tx_ring[r_idx]); |
1082 | #ifdef CONFIG_IXGBE_DCA | ||
1083 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
1084 | ixgbe_update_tx_dca(adapter, tx_ring); | ||
1085 | #endif | ||
1086 | tx_ring->total_bytes = 0; | 1229 | tx_ring->total_bytes = 0; |
1087 | tx_ring->total_packets = 0; | 1230 | tx_ring->total_packets = 0; |
1088 | ixgbe_clean_tx_irq(adapter, tx_ring); | ||
1089 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 1231 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
1090 | r_idx + 1); | 1232 | r_idx + 1); |
1091 | } | 1233 | } |
1092 | 1234 | ||
1235 | /* disable interrupts on this vector only */ | ||
1236 | ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); | ||
1237 | napi_schedule(&q_vector->napi); | ||
1238 | |||
1093 | return IRQ_HANDLED; | 1239 | return IRQ_HANDLED; |
1094 | } | 1240 | } |
1095 | 1241 | ||
@@ -1121,7 +1267,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) | |||
1121 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1267 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1122 | rx_ring = &(adapter->rx_ring[r_idx]); | 1268 | rx_ring = &(adapter->rx_ring[r_idx]); |
1123 | /* disable interrupts on this vector only */ | 1269 | /* disable interrupts on this vector only */ |
1124 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); | 1270 | ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); |
1125 | napi_schedule(&q_vector->napi); | 1271 | napi_schedule(&q_vector->napi); |
1126 | 1272 | ||
1127 | return IRQ_HANDLED; | 1273 | return IRQ_HANDLED; |
@@ -1129,8 +1275,36 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) | |||
1129 | 1275 | ||
1130 | static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | 1276 | static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) |
1131 | { | 1277 | { |
1132 | ixgbe_msix_clean_rx(irq, data); | 1278 | struct ixgbe_q_vector *q_vector = data; |
1133 | ixgbe_msix_clean_tx(irq, data); | 1279 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1280 | struct ixgbe_ring *ring; | ||
1281 | int r_idx; | ||
1282 | int i; | ||
1283 | |||
1284 | if (!q_vector->txr_count && !q_vector->rxr_count) | ||
1285 | return IRQ_HANDLED; | ||
1286 | |||
1287 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | ||
1288 | for (i = 0; i < q_vector->txr_count; i++) { | ||
1289 | ring = &(adapter->tx_ring[r_idx]); | ||
1290 | ring->total_bytes = 0; | ||
1291 | ring->total_packets = 0; | ||
1292 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | ||
1293 | r_idx + 1); | ||
1294 | } | ||
1295 | |||
1296 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | ||
1297 | for (i = 0; i < q_vector->rxr_count; i++) { | ||
1298 | ring = &(adapter->rx_ring[r_idx]); | ||
1299 | ring->total_bytes = 0; | ||
1300 | ring->total_packets = 0; | ||
1301 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | ||
1302 | r_idx + 1); | ||
1303 | } | ||
1304 | |||
1305 | /* disable interrupts on this vector only */ | ||
1306 | ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); | ||
1307 | napi_schedule(&q_vector->napi); | ||
1134 | 1308 | ||
1135 | return IRQ_HANDLED; | 1309 | return IRQ_HANDLED; |
1136 | } | 1310 | } |
@@ -1167,29 +1341,42 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | |||
1167 | if (adapter->itr_setting & 1) | 1341 | if (adapter->itr_setting & 1) |
1168 | ixgbe_set_itr_msix(q_vector); | 1342 | ixgbe_set_itr_msix(q_vector); |
1169 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1343 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1170 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx); | 1344 | ixgbe_irq_enable_queues(adapter, |
1345 | ((u64)1 << q_vector->v_idx)); | ||
1171 | } | 1346 | } |
1172 | 1347 | ||
1173 | return work_done; | 1348 | return work_done; |
1174 | } | 1349 | } |
1175 | 1350 | ||
1176 | /** | 1351 | /** |
1177 | * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine | 1352 | * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine |
1178 | * @napi: napi struct with our devices info in it | 1353 | * @napi: napi struct with our devices info in it |
1179 | * @budget: amount of work driver is allowed to do this pass, in packets | 1354 | * @budget: amount of work driver is allowed to do this pass, in packets |
1180 | * | 1355 | * |
1181 | * This function will clean more than one rx queue associated with a | 1356 | * This function will clean more than one rx queue associated with a |
1182 | * q_vector. | 1357 | * q_vector. |
1183 | **/ | 1358 | **/ |
1184 | static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) | 1359 | static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) |
1185 | { | 1360 | { |
1186 | struct ixgbe_q_vector *q_vector = | 1361 | struct ixgbe_q_vector *q_vector = |
1187 | container_of(napi, struct ixgbe_q_vector, napi); | 1362 | container_of(napi, struct ixgbe_q_vector, napi); |
1188 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1363 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1189 | struct ixgbe_ring *rx_ring = NULL; | 1364 | struct ixgbe_ring *ring = NULL; |
1190 | int work_done = 0, i; | 1365 | int work_done = 0, i; |
1191 | long r_idx; | 1366 | long r_idx; |
1192 | u16 enable_mask = 0; | 1367 | bool tx_clean_complete = true; |
1368 | |||
1369 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | ||
1370 | for (i = 0; i < q_vector->txr_count; i++) { | ||
1371 | ring = &(adapter->tx_ring[r_idx]); | ||
1372 | #ifdef CONFIG_IXGBE_DCA | ||
1373 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
1374 | ixgbe_update_tx_dca(adapter, ring); | ||
1375 | #endif | ||
1376 | tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); | ||
1377 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | ||
1378 | r_idx + 1); | ||
1379 | } | ||
1193 | 1380 | ||
1194 | /* attempt to distribute budget to each queue fairly, but don't allow | 1381 | /* attempt to distribute budget to each queue fairly, but don't allow |
1195 | * the budget to go below 1 because we'll exit polling */ | 1382 | * the budget to go below 1 because we'll exit polling */ |
@@ -1197,47 +1384,87 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) | |||
1197 | budget = max(budget, 1); | 1384 | budget = max(budget, 1); |
1198 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1385 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1199 | for (i = 0; i < q_vector->rxr_count; i++) { | 1386 | for (i = 0; i < q_vector->rxr_count; i++) { |
1200 | rx_ring = &(adapter->rx_ring[r_idx]); | 1387 | ring = &(adapter->rx_ring[r_idx]); |
1201 | #ifdef CONFIG_IXGBE_DCA | 1388 | #ifdef CONFIG_IXGBE_DCA |
1202 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 1389 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1203 | ixgbe_update_rx_dca(adapter, rx_ring); | 1390 | ixgbe_update_rx_dca(adapter, ring); |
1204 | #endif | 1391 | #endif |
1205 | ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); | 1392 | ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); |
1206 | enable_mask |= rx_ring->v_idx; | ||
1207 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 1393 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
1208 | r_idx + 1); | 1394 | r_idx + 1); |
1209 | } | 1395 | } |
1210 | 1396 | ||
1211 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1397 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1212 | rx_ring = &(adapter->rx_ring[r_idx]); | 1398 | ring = &(adapter->rx_ring[r_idx]); |
1213 | /* If all Rx work done, exit the polling mode */ | 1399 | /* If all Rx work done, exit the polling mode */ |
1214 | if (work_done < budget) { | 1400 | if (work_done < budget) { |
1215 | napi_complete(napi); | 1401 | napi_complete(napi); |
1216 | if (adapter->itr_setting & 1) | 1402 | if (adapter->itr_setting & 1) |
1217 | ixgbe_set_itr_msix(q_vector); | 1403 | ixgbe_set_itr_msix(q_vector); |
1218 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1404 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1219 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask); | 1405 | ixgbe_irq_enable_queues(adapter, |
1406 | ((u64)1 << q_vector->v_idx)); | ||
1220 | return 0; | 1407 | return 0; |
1221 | } | 1408 | } |
1222 | 1409 | ||
1223 | return work_done; | 1410 | return work_done; |
1224 | } | 1411 | } |
1412 | |||
1413 | /** | ||
1414 | * ixgbe_clean_txonly - msix (aka one shot) tx clean routine | ||
1415 | * @napi: napi struct with our devices info in it | ||
1416 | * @budget: amount of work driver is allowed to do this pass, in packets | ||
1417 | * | ||
1418 | * This function is optimized for cleaning one queue only on a single | ||
1419 | * q_vector!!! | ||
1420 | **/ | ||
1421 | static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) | ||
1422 | { | ||
1423 | struct ixgbe_q_vector *q_vector = | ||
1424 | container_of(napi, struct ixgbe_q_vector, napi); | ||
1425 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
1426 | struct ixgbe_ring *tx_ring = NULL; | ||
1427 | int work_done = 0; | ||
1428 | long r_idx; | ||
1429 | |||
1430 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | ||
1431 | tx_ring = &(adapter->tx_ring[r_idx]); | ||
1432 | #ifdef CONFIG_IXGBE_DCA | ||
1433 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
1434 | ixgbe_update_tx_dca(adapter, tx_ring); | ||
1435 | #endif | ||
1436 | |||
1437 | if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) | ||
1438 | work_done = budget; | ||
1439 | |||
1440 | /* If all Rx work done, exit the polling mode */ | ||
1441 | if (work_done < budget) { | ||
1442 | napi_complete(napi); | ||
1443 | if (adapter->itr_setting & 1) | ||
1444 | ixgbe_set_itr_msix(q_vector); | ||
1445 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
1446 | ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); | ||
1447 | } | ||
1448 | |||
1449 | return work_done; | ||
1450 | } | ||
1451 | |||
1225 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, | 1452 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, |
1226 | int r_idx) | 1453 | int r_idx) |
1227 | { | 1454 | { |
1228 | a->q_vector[v_idx].adapter = a; | 1455 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
1229 | set_bit(r_idx, a->q_vector[v_idx].rxr_idx); | 1456 | |
1230 | a->q_vector[v_idx].rxr_count++; | 1457 | set_bit(r_idx, q_vector->rxr_idx); |
1231 | a->rx_ring[r_idx].v_idx = 1 << v_idx; | 1458 | q_vector->rxr_count++; |
1232 | } | 1459 | } |
1233 | 1460 | ||
1234 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, | 1461 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, |
1235 | int r_idx) | 1462 | int t_idx) |
1236 | { | 1463 | { |
1237 | a->q_vector[v_idx].adapter = a; | 1464 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
1238 | set_bit(r_idx, a->q_vector[v_idx].txr_idx); | 1465 | |
1239 | a->q_vector[v_idx].txr_count++; | 1466 | set_bit(t_idx, q_vector->txr_idx); |
1240 | a->tx_ring[r_idx].v_idx = 1 << v_idx; | 1467 | q_vector->txr_count++; |
1241 | } | 1468 | } |
1242 | 1469 | ||
1243 | /** | 1470 | /** |
@@ -1333,7 +1560,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
1333 | (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ | 1560 | (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ |
1334 | &ixgbe_msix_clean_many) | 1561 | &ixgbe_msix_clean_many) |
1335 | for (vector = 0; vector < q_vectors; vector++) { | 1562 | for (vector = 0; vector < q_vectors; vector++) { |
1336 | handler = SET_HANDLER(&adapter->q_vector[vector]); | 1563 | handler = SET_HANDLER(adapter->q_vector[vector]); |
1337 | 1564 | ||
1338 | if(handler == &ixgbe_msix_clean_rx) { | 1565 | if(handler == &ixgbe_msix_clean_rx) { |
1339 | sprintf(adapter->name[vector], "%s-%s-%d", | 1566 | sprintf(adapter->name[vector], "%s-%s-%d", |
@@ -1349,7 +1576,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
1349 | 1576 | ||
1350 | err = request_irq(adapter->msix_entries[vector].vector, | 1577 | err = request_irq(adapter->msix_entries[vector].vector, |
1351 | handler, 0, adapter->name[vector], | 1578 | handler, 0, adapter->name[vector], |
1352 | &(adapter->q_vector[vector])); | 1579 | adapter->q_vector[vector]); |
1353 | if (err) { | 1580 | if (err) { |
1354 | DPRINTK(PROBE, ERR, | 1581 | DPRINTK(PROBE, ERR, |
1355 | "request_irq failed for MSIX interrupt " | 1582 | "request_irq failed for MSIX interrupt " |
@@ -1372,7 +1599,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
1372 | free_queue_irqs: | 1599 | free_queue_irqs: |
1373 | for (i = vector - 1; i >= 0; i--) | 1600 | for (i = vector - 1; i >= 0; i--) |
1374 | free_irq(adapter->msix_entries[--vector].vector, | 1601 | free_irq(adapter->msix_entries[--vector].vector, |
1375 | &(adapter->q_vector[i])); | 1602 | adapter->q_vector[i]); |
1376 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 1603 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
1377 | pci_disable_msix(adapter->pdev); | 1604 | pci_disable_msix(adapter->pdev); |
1378 | kfree(adapter->msix_entries); | 1605 | kfree(adapter->msix_entries); |
@@ -1383,7 +1610,7 @@ out: | |||
1383 | 1610 | ||
1384 | static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | 1611 | static void ixgbe_set_itr(struct ixgbe_adapter *adapter) |
1385 | { | 1612 | { |
1386 | struct ixgbe_q_vector *q_vector = adapter->q_vector; | 1613 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
1387 | u8 current_itr; | 1614 | u8 current_itr; |
1388 | u32 new_itr = q_vector->eitr; | 1615 | u32 new_itr = q_vector->eitr; |
1389 | struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; | 1616 | struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; |
@@ -1416,14 +1643,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | |||
1416 | } | 1643 | } |
1417 | 1644 | ||
1418 | if (new_itr != q_vector->eitr) { | 1645 | if (new_itr != q_vector->eitr) { |
1419 | u32 itr_reg; | 1646 | /* do an exponential smoothing */ |
1647 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | ||
1420 | 1648 | ||
1421 | /* save the algorithm value here, not the smoothed one */ | 1649 | /* save the algorithm value here, not the smoothed one */ |
1422 | q_vector->eitr = new_itr; | 1650 | q_vector->eitr = new_itr; |
1423 | /* do an exponential smoothing */ | 1651 | |
1424 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | 1652 | ixgbe_write_eitr(q_vector); |
1425 | itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); | ||
1426 | ixgbe_write_eitr(adapter, 0, itr_reg); | ||
1427 | } | 1653 | } |
1428 | 1654 | ||
1429 | return; | 1655 | return; |
@@ -1436,7 +1662,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | |||
1436 | static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | 1662 | static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) |
1437 | { | 1663 | { |
1438 | u32 mask; | 1664 | u32 mask; |
1439 | mask = IXGBE_EIMS_ENABLE_MASK; | 1665 | |
1666 | mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); | ||
1440 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) | 1667 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) |
1441 | mask |= IXGBE_EIMS_GPI_SDP1; | 1668 | mask |= IXGBE_EIMS_GPI_SDP1; |
1442 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1669 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
@@ -1444,16 +1671,12 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | |||
1444 | mask |= IXGBE_EIMS_GPI_SDP1; | 1671 | mask |= IXGBE_EIMS_GPI_SDP1; |
1445 | mask |= IXGBE_EIMS_GPI_SDP2; | 1672 | mask |= IXGBE_EIMS_GPI_SDP2; |
1446 | } | 1673 | } |
1674 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
1675 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
1676 | mask |= IXGBE_EIMS_FLOW_DIR; | ||
1447 | 1677 | ||
1448 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | 1678 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
1449 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1679 | ixgbe_irq_enable_queues(adapter, ~0); |
1450 | /* enable the rest of the queue vectors */ | ||
1451 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), | ||
1452 | (IXGBE_EIMS_RTX_QUEUE << 16)); | ||
1453 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2), | ||
1454 | ((IXGBE_EIMS_RTX_QUEUE << 16) | | ||
1455 | IXGBE_EIMS_RTX_QUEUE)); | ||
1456 | } | ||
1457 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1680 | IXGBE_WRITE_FLUSH(&adapter->hw); |
1458 | } | 1681 | } |
1459 | 1682 | ||
@@ -1467,6 +1690,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
1467 | struct net_device *netdev = data; | 1690 | struct net_device *netdev = data; |
1468 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 1691 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
1469 | struct ixgbe_hw *hw = &adapter->hw; | 1692 | struct ixgbe_hw *hw = &adapter->hw; |
1693 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; | ||
1470 | u32 eicr; | 1694 | u32 eicr; |
1471 | 1695 | ||
1472 | /* | 1696 | /* |
@@ -1494,13 +1718,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
1494 | 1718 | ||
1495 | ixgbe_check_fan_failure(adapter, eicr); | 1719 | ixgbe_check_fan_failure(adapter, eicr); |
1496 | 1720 | ||
1497 | if (napi_schedule_prep(&adapter->q_vector[0].napi)) { | 1721 | if (napi_schedule_prep(&(q_vector->napi))) { |
1498 | adapter->tx_ring[0].total_packets = 0; | 1722 | adapter->tx_ring[0].total_packets = 0; |
1499 | adapter->tx_ring[0].total_bytes = 0; | 1723 | adapter->tx_ring[0].total_bytes = 0; |
1500 | adapter->rx_ring[0].total_packets = 0; | 1724 | adapter->rx_ring[0].total_packets = 0; |
1501 | adapter->rx_ring[0].total_bytes = 0; | 1725 | adapter->rx_ring[0].total_bytes = 0; |
1502 | /* would disable interrupts here but EIAM disabled it */ | 1726 | /* would disable interrupts here but EIAM disabled it */ |
1503 | __napi_schedule(&adapter->q_vector[0].napi); | 1727 | __napi_schedule(&(q_vector->napi)); |
1504 | } | 1728 | } |
1505 | 1729 | ||
1506 | return IRQ_HANDLED; | 1730 | return IRQ_HANDLED; |
@@ -1511,7 +1735,7 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) | |||
1511 | int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 1735 | int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
1512 | 1736 | ||
1513 | for (i = 0; i < q_vectors; i++) { | 1737 | for (i = 0; i < q_vectors; i++) { |
1514 | struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; | 1738 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; |
1515 | bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); | 1739 | bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); |
1516 | bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); | 1740 | bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); |
1517 | q_vector->rxr_count = 0; | 1741 | q_vector->rxr_count = 0; |
@@ -1562,7 +1786,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |||
1562 | i--; | 1786 | i--; |
1563 | for (; i >= 0; i--) { | 1787 | for (; i >= 0; i--) { |
1564 | free_irq(adapter->msix_entries[i].vector, | 1788 | free_irq(adapter->msix_entries[i].vector, |
1565 | &(adapter->q_vector[i])); | 1789 | adapter->q_vector[i]); |
1566 | } | 1790 | } |
1567 | 1791 | ||
1568 | ixgbe_reset_q_vectors(adapter); | 1792 | ixgbe_reset_q_vectors(adapter); |
@@ -1577,10 +1801,12 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |||
1577 | **/ | 1801 | **/ |
1578 | static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) | 1802 | static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) |
1579 | { | 1803 | { |
1580 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); | 1804 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
1581 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1805 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); |
1806 | } else { | ||
1807 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); | ||
1808 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); | ||
1582 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); | 1809 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); |
1583 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(2), ~0); | ||
1584 | } | 1810 | } |
1585 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1811 | IXGBE_WRITE_FLUSH(&adapter->hw); |
1586 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 1812 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
@@ -1592,18 +1818,6 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) | |||
1592 | } | 1818 | } |
1593 | } | 1819 | } |
1594 | 1820 | ||
1595 | static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter) | ||
1596 | { | ||
1597 | u32 mask = IXGBE_EIMS_RTX_QUEUE; | ||
1598 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | ||
1599 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | ||
1600 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask << 16); | ||
1601 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2), | ||
1602 | (mask << 16 | mask)); | ||
1603 | } | ||
1604 | /* skip the flush */ | ||
1605 | } | ||
1606 | |||
1607 | /** | 1821 | /** |
1608 | * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts | 1822 | * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts |
1609 | * | 1823 | * |
@@ -1673,11 +1887,34 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) | |||
1673 | u32 srrctl; | 1887 | u32 srrctl; |
1674 | int queue0 = 0; | 1888 | int queue0 = 0; |
1675 | unsigned long mask; | 1889 | unsigned long mask; |
1890 | struct ixgbe_ring_feature *feature = adapter->ring_feature; | ||
1676 | 1891 | ||
1677 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1892 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
1678 | queue0 = index; | 1893 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
1894 | int dcb_i = feature[RING_F_DCB].indices; | ||
1895 | if (dcb_i == 8) | ||
1896 | queue0 = index >> 4; | ||
1897 | else if (dcb_i == 4) | ||
1898 | queue0 = index >> 5; | ||
1899 | else | ||
1900 | dev_err(&adapter->pdev->dev, "Invalid DCB " | ||
1901 | "configuration\n"); | ||
1902 | #ifdef IXGBE_FCOE | ||
1903 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
1904 | struct ixgbe_ring_feature *f; | ||
1905 | |||
1906 | rx_ring = &adapter->rx_ring[queue0]; | ||
1907 | f = &adapter->ring_feature[RING_F_FCOE]; | ||
1908 | if ((queue0 == 0) && (index > rx_ring->reg_idx)) | ||
1909 | queue0 = f->mask + index - | ||
1910 | rx_ring->reg_idx - 1; | ||
1911 | } | ||
1912 | #endif /* IXGBE_FCOE */ | ||
1913 | } else { | ||
1914 | queue0 = index; | ||
1915 | } | ||
1679 | } else { | 1916 | } else { |
1680 | mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask; | 1917 | mask = (unsigned long) feature[RING_F_RSS].mask; |
1681 | queue0 = index & mask; | 1918 | queue0 = index & mask; |
1682 | index = index & mask; | 1919 | index = index & mask; |
1683 | } | 1920 | } |
@@ -1689,33 +1926,55 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) | |||
1689 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; | 1926 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; |
1690 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; | 1927 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; |
1691 | 1928 | ||
1929 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & | ||
1930 | IXGBE_SRRCTL_BSIZEHDR_MASK; | ||
1931 | |||
1692 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 1932 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
1693 | u16 bufsz = IXGBE_RXBUFFER_2048; | 1933 | #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER |
1694 | /* grow the amount we can receive on large page machines */ | 1934 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
1695 | if (bufsz < (PAGE_SIZE / 2)) | 1935 | #else |
1696 | bufsz = (PAGE_SIZE / 2); | 1936 | srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
1697 | /* cap the bufsz at our largest descriptor size */ | 1937 | #endif |
1698 | bufsz = min((u16)IXGBE_MAX_RXBUFFER, bufsz); | ||
1699 | |||
1700 | srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||
1701 | srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; | 1938 | srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; |
1702 | srrctl |= ((IXGBE_RX_HDR_SIZE << | ||
1703 | IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & | ||
1704 | IXGBE_SRRCTL_BSIZEHDR_MASK); | ||
1705 | } else { | 1939 | } else { |
1940 | srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> | ||
1941 | IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||
1706 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | 1942 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
1707 | |||
1708 | if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) | ||
1709 | srrctl |= IXGBE_RXBUFFER_2048 >> | ||
1710 | IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||
1711 | else | ||
1712 | srrctl |= rx_ring->rx_buf_len >> | ||
1713 | IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||
1714 | } | 1943 | } |
1715 | 1944 | ||
1716 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); | 1945 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); |
1717 | } | 1946 | } |
1718 | 1947 | ||
1948 | static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | ||
1949 | { | ||
1950 | u32 mrqc = 0; | ||
1951 | int mask; | ||
1952 | |||
1953 | if (!(adapter->hw.mac.type == ixgbe_mac_82599EB)) | ||
1954 | return mrqc; | ||
1955 | |||
1956 | mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED | ||
1957 | #ifdef CONFIG_IXGBE_DCB | ||
1958 | | IXGBE_FLAG_DCB_ENABLED | ||
1959 | #endif | ||
1960 | ); | ||
1961 | |||
1962 | switch (mask) { | ||
1963 | case (IXGBE_FLAG_RSS_ENABLED): | ||
1964 | mrqc = IXGBE_MRQC_RSSEN; | ||
1965 | break; | ||
1966 | #ifdef CONFIG_IXGBE_DCB | ||
1967 | case (IXGBE_FLAG_DCB_ENABLED): | ||
1968 | mrqc = IXGBE_MRQC_RT8TCEN; | ||
1969 | break; | ||
1970 | #endif /* CONFIG_IXGBE_DCB */ | ||
1971 | default: | ||
1972 | break; | ||
1973 | } | ||
1974 | |||
1975 | return mrqc; | ||
1976 | } | ||
1977 | |||
1719 | /** | 1978 | /** |
1720 | * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset | 1979 | * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset |
1721 | * @adapter: board private structure | 1980 | * @adapter: board private structure |
@@ -1736,11 +1995,17 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1736 | u32 fctrl, hlreg0; | 1995 | u32 fctrl, hlreg0; |
1737 | u32 reta = 0, mrqc = 0; | 1996 | u32 reta = 0, mrqc = 0; |
1738 | u32 rdrxctl; | 1997 | u32 rdrxctl; |
1998 | u32 rscctrl; | ||
1739 | int rx_buf_len; | 1999 | int rx_buf_len; |
1740 | 2000 | ||
1741 | /* Decide whether to use packet split mode or not */ | 2001 | /* Decide whether to use packet split mode or not */ |
1742 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | 2002 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; |
1743 | 2003 | ||
2004 | #ifdef IXGBE_FCOE | ||
2005 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | ||
2006 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; | ||
2007 | #endif /* IXGBE_FCOE */ | ||
2008 | |||
1744 | /* Set the RX buffer length according to the mode */ | 2009 | /* Set the RX buffer length according to the mode */ |
1745 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 2010 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
1746 | rx_buf_len = IXGBE_RX_HDR_SIZE; | 2011 | rx_buf_len = IXGBE_RX_HDR_SIZE; |
@@ -1749,11 +2014,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1749 | u32 psrtype = IXGBE_PSRTYPE_TCPHDR | | 2014 | u32 psrtype = IXGBE_PSRTYPE_TCPHDR | |
1750 | IXGBE_PSRTYPE_UDPHDR | | 2015 | IXGBE_PSRTYPE_UDPHDR | |
1751 | IXGBE_PSRTYPE_IPV4HDR | | 2016 | IXGBE_PSRTYPE_IPV4HDR | |
1752 | IXGBE_PSRTYPE_IPV6HDR; | 2017 | IXGBE_PSRTYPE_IPV6HDR | |
2018 | IXGBE_PSRTYPE_L2HDR; | ||
1753 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); | 2019 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); |
1754 | } | 2020 | } |
1755 | } else { | 2021 | } else { |
1756 | if (netdev->mtu <= ETH_DATA_LEN) | 2022 | if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) && |
2023 | (netdev->mtu <= ETH_DATA_LEN)) | ||
1757 | rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 2024 | rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
1758 | else | 2025 | else |
1759 | rx_buf_len = ALIGN(max_frame, 1024); | 2026 | rx_buf_len = ALIGN(max_frame, 1024); |
@@ -1770,6 +2037,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1770 | hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; | 2037 | hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; |
1771 | else | 2038 | else |
1772 | hlreg0 |= IXGBE_HLREG0_JUMBOEN; | 2039 | hlreg0 |= IXGBE_HLREG0_JUMBOEN; |
2040 | #ifdef IXGBE_FCOE | ||
2041 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | ||
2042 | hlreg0 |= IXGBE_HLREG0_JUMBOEN; | ||
2043 | #endif | ||
1773 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); | 2044 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); |
1774 | 2045 | ||
1775 | rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); | 2046 | rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); |
@@ -1777,8 +2048,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1777 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 2048 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
1778 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | 2049 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
1779 | 2050 | ||
1780 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 2051 | /* |
1781 | * the Base and Length of the Rx Descriptor Ring */ | 2052 | * Setup the HW Rx Head and Tail Descriptor Pointers and |
2053 | * the Base and Length of the Rx Descriptor Ring | ||
2054 | */ | ||
1782 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2055 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1783 | rdba = adapter->rx_ring[i].dma; | 2056 | rdba = adapter->rx_ring[i].dma; |
1784 | j = adapter->rx_ring[i].reg_idx; | 2057 | j = adapter->rx_ring[i].reg_idx; |
@@ -1791,6 +2064,17 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1791 | adapter->rx_ring[i].tail = IXGBE_RDT(j); | 2064 | adapter->rx_ring[i].tail = IXGBE_RDT(j); |
1792 | adapter->rx_ring[i].rx_buf_len = rx_buf_len; | 2065 | adapter->rx_ring[i].rx_buf_len = rx_buf_len; |
1793 | 2066 | ||
2067 | #ifdef IXGBE_FCOE | ||
2068 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
2069 | struct ixgbe_ring_feature *f; | ||
2070 | f = &adapter->ring_feature[RING_F_FCOE]; | ||
2071 | if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) && | ||
2072 | (i >= f->mask) && (i < f->mask + f->indices)) | ||
2073 | adapter->rx_ring[i].rx_buf_len = | ||
2074 | IXGBE_FCOE_JUMBO_FRAME_SIZE; | ||
2075 | } | ||
2076 | |||
2077 | #endif /* IXGBE_FCOE */ | ||
1794 | ixgbe_configure_srrctl(adapter, j); | 2078 | ixgbe_configure_srrctl(adapter, j); |
1795 | } | 2079 | } |
1796 | 2080 | ||
@@ -1811,23 +2095,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1811 | } | 2095 | } |
1812 | 2096 | ||
1813 | /* Program MRQC for the distribution of queues */ | 2097 | /* Program MRQC for the distribution of queues */ |
1814 | if (hw->mac.type == ixgbe_mac_82599EB) { | 2098 | mrqc = ixgbe_setup_mrqc(adapter); |
1815 | int mask = adapter->flags & ( | ||
1816 | IXGBE_FLAG_RSS_ENABLED | ||
1817 | | IXGBE_FLAG_DCB_ENABLED | ||
1818 | ); | ||
1819 | 2099 | ||
1820 | switch (mask) { | ||
1821 | case (IXGBE_FLAG_RSS_ENABLED): | ||
1822 | mrqc = IXGBE_MRQC_RSSEN; | ||
1823 | break; | ||
1824 | case (IXGBE_FLAG_DCB_ENABLED): | ||
1825 | mrqc = IXGBE_MRQC_RT8TCEN; | ||
1826 | break; | ||
1827 | default: | ||
1828 | break; | ||
1829 | } | ||
1830 | } | ||
1831 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 2100 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
1832 | /* Fill out redirection table */ | 2101 | /* Fill out redirection table */ |
1833 | for (i = 0, j = 0; i < 128; i++, j++) { | 2102 | for (i = 0, j = 0; i < 128; i++, j++) { |
@@ -1875,8 +2144,45 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1875 | if (hw->mac.type == ixgbe_mac_82599EB) { | 2144 | if (hw->mac.type == ixgbe_mac_82599EB) { |
1876 | rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); | 2145 | rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); |
1877 | rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; | 2146 | rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; |
2147 | rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; | ||
1878 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); | 2148 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); |
1879 | } | 2149 | } |
2150 | |||
2151 | if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) { | ||
2152 | /* Enable 82599 HW-RSC */ | ||
2153 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
2154 | j = adapter->rx_ring[i].reg_idx; | ||
2155 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); | ||
2156 | rscctrl |= IXGBE_RSCCTL_RSCEN; | ||
2157 | /* | ||
2158 | * we must limit the number of descriptors so that the | ||
2159 | * total size of max desc * buf_len is not greater | ||
2160 | * than 65535 | ||
2161 | */ | ||
2162 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | ||
2163 | #if (MAX_SKB_FRAGS > 16) | ||
2164 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; | ||
2165 | #elif (MAX_SKB_FRAGS > 8) | ||
2166 | rscctrl |= IXGBE_RSCCTL_MAXDESC_8; | ||
2167 | #elif (MAX_SKB_FRAGS > 4) | ||
2168 | rscctrl |= IXGBE_RSCCTL_MAXDESC_4; | ||
2169 | #else | ||
2170 | rscctrl |= IXGBE_RSCCTL_MAXDESC_1; | ||
2171 | #endif | ||
2172 | } else { | ||
2173 | if (rx_buf_len < IXGBE_RXBUFFER_4096) | ||
2174 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; | ||
2175 | else if (rx_buf_len < IXGBE_RXBUFFER_8192) | ||
2176 | rscctrl |= IXGBE_RSCCTL_MAXDESC_8; | ||
2177 | else | ||
2178 | rscctrl |= IXGBE_RSCCTL_MAXDESC_4; | ||
2179 | } | ||
2180 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl); | ||
2181 | } | ||
2182 | /* Disable RSC for ACK packets */ | ||
2183 | IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, | ||
2184 | (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); | ||
2185 | } | ||
1880 | } | 2186 | } |
1881 | 2187 | ||
1882 | static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | 2188 | static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
@@ -2015,11 +2321,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev) | |||
2015 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 2321 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
2016 | 2322 | ||
2017 | /* reprogram secondary unicast list */ | 2323 | /* reprogram secondary unicast list */ |
2018 | addr_count = netdev->uc_count; | 2324 | hw->mac.ops.update_uc_addr_list(hw, &netdev->uc_list); |
2019 | if (addr_count) | ||
2020 | addr_list = netdev->uc_list->dmi_addr; | ||
2021 | hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count, | ||
2022 | ixgbe_addr_list_itr); | ||
2023 | 2325 | ||
2024 | /* reprogram multicast list */ | 2326 | /* reprogram multicast list */ |
2025 | addr_count = netdev->mc_count; | 2327 | addr_count = netdev->mc_count; |
@@ -2041,13 +2343,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) | |||
2041 | 2343 | ||
2042 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | 2344 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { |
2043 | struct napi_struct *napi; | 2345 | struct napi_struct *napi; |
2044 | q_vector = &adapter->q_vector[q_idx]; | 2346 | q_vector = adapter->q_vector[q_idx]; |
2045 | if (!q_vector->rxr_count) | ||
2046 | continue; | ||
2047 | napi = &q_vector->napi; | 2347 | napi = &q_vector->napi; |
2048 | if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) && | 2348 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
2049 | (q_vector->rxr_count > 1)) | 2349 | if (!q_vector->rxr_count || !q_vector->txr_count) { |
2050 | napi->poll = &ixgbe_clean_rxonly_many; | 2350 | if (q_vector->txr_count == 1) |
2351 | napi->poll = &ixgbe_clean_txonly; | ||
2352 | else if (q_vector->rxr_count == 1) | ||
2353 | napi->poll = &ixgbe_clean_rxonly; | ||
2354 | } | ||
2355 | } | ||
2051 | 2356 | ||
2052 | napi_enable(napi); | 2357 | napi_enable(napi); |
2053 | } | 2358 | } |
@@ -2064,9 +2369,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) | |||
2064 | q_vectors = 1; | 2369 | q_vectors = 1; |
2065 | 2370 | ||
2066 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | 2371 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { |
2067 | q_vector = &adapter->q_vector[q_idx]; | 2372 | q_vector = adapter->q_vector[q_idx]; |
2068 | if (!q_vector->rxr_count) | ||
2069 | continue; | ||
2070 | napi_disable(&q_vector->napi); | 2373 | napi_disable(&q_vector->napi); |
2071 | } | 2374 | } |
2072 | } | 2375 | } |
@@ -2124,6 +2427,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
2124 | static void ixgbe_configure(struct ixgbe_adapter *adapter) | 2427 | static void ixgbe_configure(struct ixgbe_adapter *adapter) |
2125 | { | 2428 | { |
2126 | struct net_device *netdev = adapter->netdev; | 2429 | struct net_device *netdev = adapter->netdev; |
2430 | struct ixgbe_hw *hw = &adapter->hw; | ||
2127 | int i; | 2431 | int i; |
2128 | 2432 | ||
2129 | ixgbe_set_rx_mode(netdev); | 2433 | ixgbe_set_rx_mode(netdev); |
@@ -2140,6 +2444,20 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
2140 | netif_set_gso_max_size(netdev, 65536); | 2444 | netif_set_gso_max_size(netdev, 65536); |
2141 | #endif | 2445 | #endif |
2142 | 2446 | ||
2447 | #ifdef IXGBE_FCOE | ||
2448 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | ||
2449 | ixgbe_configure_fcoe(adapter); | ||
2450 | |||
2451 | #endif /* IXGBE_FCOE */ | ||
2452 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
2453 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2454 | adapter->tx_ring[i].atr_sample_rate = | ||
2455 | adapter->atr_sample_rate; | ||
2456 | ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); | ||
2457 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { | ||
2458 | ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); | ||
2459 | } | ||
2460 | |||
2143 | ixgbe_configure_tx(adapter); | 2461 | ixgbe_configure_tx(adapter); |
2144 | ixgbe_configure_rx(adapter); | 2462 | ixgbe_configure_rx(adapter); |
2145 | for (i = 0; i < adapter->num_rx_queues; i++) | 2463 | for (i = 0; i < adapter->num_rx_queues; i++) |
@@ -2294,6 +2612,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2294 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | 2612 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); |
2295 | } | 2613 | } |
2296 | 2614 | ||
2615 | #ifdef IXGBE_FCOE | ||
2616 | /* adjust max frame to be able to do baby jumbo for FCoE */ | ||
2617 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | ||
2618 | (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) | ||
2619 | max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; | ||
2620 | |||
2621 | #endif /* IXGBE_FCOE */ | ||
2297 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); | 2622 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); |
2298 | if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { | 2623 | if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { |
2299 | mhadd &= ~IXGBE_MHADD_MFS_MASK; | 2624 | mhadd &= ~IXGBE_MHADD_MFS_MASK; |
@@ -2357,6 +2682,17 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2357 | ixgbe_irq_enable(adapter); | 2682 | ixgbe_irq_enable(adapter); |
2358 | 2683 | ||
2359 | /* | 2684 | /* |
2685 | * If this adapter has a fan, check to see if we had a failure | ||
2686 | * before we enabled the interrupt. | ||
2687 | */ | ||
2688 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { | ||
2689 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||
2690 | if (esdp & IXGBE_ESDP_SDP1) | ||
2691 | DPRINTK(DRV, CRIT, | ||
2692 | "Fan has stopped, replace the adapter\n"); | ||
2693 | } | ||
2694 | |||
2695 | /* | ||
2360 | * For hot-pluggable SFP+ devices, a new SFP+ module may have | 2696 | * For hot-pluggable SFP+ devices, a new SFP+ module may have |
2361 | * arrived before interrupts were enabled. We need to kick off | 2697 | * arrived before interrupts were enabled. We need to kick off |
2362 | * the SFP+ module setup first, then try to bring up link. | 2698 | * the SFP+ module setup first, then try to bring up link. |
@@ -2378,6 +2714,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2378 | DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); | 2714 | DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); |
2379 | } | 2715 | } |
2380 | 2716 | ||
2717 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2718 | set_bit(__IXGBE_FDIR_INIT_DONE, | ||
2719 | &(adapter->tx_ring[i].reinit_state)); | ||
2720 | |||
2381 | /* enable transmits */ | 2721 | /* enable transmits */ |
2382 | netif_tx_start_all_queues(netdev); | 2722 | netif_tx_start_all_queues(netdev); |
2383 | 2723 | ||
@@ -2404,20 +2744,37 @@ int ixgbe_up(struct ixgbe_adapter *adapter) | |||
2404 | /* hardware has been reset, we need to reload some things */ | 2744 | /* hardware has been reset, we need to reload some things */ |
2405 | ixgbe_configure(adapter); | 2745 | ixgbe_configure(adapter); |
2406 | 2746 | ||
2407 | ixgbe_napi_add_all(adapter); | ||
2408 | |||
2409 | return ixgbe_up_complete(adapter); | 2747 | return ixgbe_up_complete(adapter); |
2410 | } | 2748 | } |
2411 | 2749 | ||
2412 | void ixgbe_reset(struct ixgbe_adapter *adapter) | 2750 | void ixgbe_reset(struct ixgbe_adapter *adapter) |
2413 | { | 2751 | { |
2414 | struct ixgbe_hw *hw = &adapter->hw; | 2752 | struct ixgbe_hw *hw = &adapter->hw; |
2415 | if (hw->mac.ops.init_hw(hw)) | 2753 | int err; |
2416 | dev_err(&adapter->pdev->dev, "Hardware Error\n"); | 2754 | |
2755 | err = hw->mac.ops.init_hw(hw); | ||
2756 | switch (err) { | ||
2757 | case 0: | ||
2758 | case IXGBE_ERR_SFP_NOT_PRESENT: | ||
2759 | break; | ||
2760 | case IXGBE_ERR_MASTER_REQUESTS_PENDING: | ||
2761 | dev_err(&adapter->pdev->dev, "master disable timed out\n"); | ||
2762 | break; | ||
2763 | case IXGBE_ERR_EEPROM_VERSION: | ||
2764 | /* We are running on a pre-production device, log a warning */ | ||
2765 | dev_warn(&adapter->pdev->dev, "This device is a pre-production " | ||
2766 | "adapter/LOM. Please be aware there may be issues " | ||
2767 | "associated with your hardware. If you are " | ||
2768 | "experiencing problems please contact your Intel or " | ||
2769 | "hardware representative who provided you with this " | ||
2770 | "hardware.\n"); | ||
2771 | break; | ||
2772 | default: | ||
2773 | dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err); | ||
2774 | } | ||
2417 | 2775 | ||
2418 | /* reprogram the RAR[0] in case user changed it. */ | 2776 | /* reprogram the RAR[0] in case user changed it. */ |
2419 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | 2777 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
2420 | |||
2421 | } | 2778 | } |
2422 | 2779 | ||
2423 | /** | 2780 | /** |
@@ -2445,8 +2802,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
2445 | rx_buffer_info->dma = 0; | 2802 | rx_buffer_info->dma = 0; |
2446 | } | 2803 | } |
2447 | if (rx_buffer_info->skb) { | 2804 | if (rx_buffer_info->skb) { |
2448 | dev_kfree_skb(rx_buffer_info->skb); | 2805 | struct sk_buff *skb = rx_buffer_info->skb; |
2449 | rx_buffer_info->skb = NULL; | 2806 | rx_buffer_info->skb = NULL; |
2807 | do { | ||
2808 | struct sk_buff *this = skb; | ||
2809 | skb = skb->prev; | ||
2810 | dev_kfree_skb(this); | ||
2811 | } while (skb); | ||
2450 | } | 2812 | } |
2451 | if (!rx_buffer_info->page) | 2813 | if (!rx_buffer_info->page) |
2452 | continue; | 2814 | continue; |
@@ -2560,6 +2922,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
2560 | del_timer_sync(&adapter->watchdog_timer); | 2922 | del_timer_sync(&adapter->watchdog_timer); |
2561 | cancel_work_sync(&adapter->watchdog_task); | 2923 | cancel_work_sync(&adapter->watchdog_task); |
2562 | 2924 | ||
2925 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
2926 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
2927 | cancel_work_sync(&adapter->fdir_reinit_task); | ||
2928 | |||
2563 | /* disable transmits in the hardware now that interrupts are off */ | 2929 | /* disable transmits in the hardware now that interrupts are off */ |
2564 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2930 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2565 | j = adapter->tx_ring[i].reg_idx; | 2931 | j = adapter->tx_ring[i].reg_idx; |
@@ -2575,13 +2941,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
2575 | 2941 | ||
2576 | netif_carrier_off(netdev); | 2942 | netif_carrier_off(netdev); |
2577 | 2943 | ||
2578 | #ifdef CONFIG_IXGBE_DCA | ||
2579 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { | ||
2580 | adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; | ||
2581 | dca_remove_requester(&adapter->pdev->dev); | ||
2582 | } | ||
2583 | |||
2584 | #endif | ||
2585 | if (!pci_channel_offline(adapter->pdev)) | 2944 | if (!pci_channel_offline(adapter->pdev)) |
2586 | ixgbe_reset(adapter); | 2945 | ixgbe_reset(adapter); |
2587 | ixgbe_clean_all_tx_rings(adapter); | 2946 | ixgbe_clean_all_tx_rings(adapter); |
@@ -2589,13 +2948,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
2589 | 2948 | ||
2590 | #ifdef CONFIG_IXGBE_DCA | 2949 | #ifdef CONFIG_IXGBE_DCA |
2591 | /* since we reset the hardware DCA settings were cleared */ | 2950 | /* since we reset the hardware DCA settings were cleared */ |
2592 | if (dca_add_requester(&adapter->pdev->dev) == 0) { | 2951 | ixgbe_setup_dca(adapter); |
2593 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; | ||
2594 | /* always use CB2 mode, difference is masked | ||
2595 | * in the CB driver */ | ||
2596 | IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2); | ||
2597 | ixgbe_setup_dca(adapter); | ||
2598 | } | ||
2599 | #endif | 2952 | #endif |
2600 | } | 2953 | } |
2601 | 2954 | ||
@@ -2620,7 +2973,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) | |||
2620 | } | 2973 | } |
2621 | #endif | 2974 | #endif |
2622 | 2975 | ||
2623 | tx_clean_complete = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); | 2976 | tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring); |
2624 | ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); | 2977 | ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); |
2625 | 2978 | ||
2626 | if (!tx_clean_complete) | 2979 | if (!tx_clean_complete) |
@@ -2632,7 +2985,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) | |||
2632 | if (adapter->itr_setting & 1) | 2985 | if (adapter->itr_setting & 1) |
2633 | ixgbe_set_itr(adapter); | 2986 | ixgbe_set_itr(adapter); |
2634 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 2987 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
2635 | ixgbe_irq_enable_queues(adapter); | 2988 | ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); |
2636 | } | 2989 | } |
2637 | return work_done; | 2990 | return work_done; |
2638 | } | 2991 | } |
@@ -2668,17 +3021,15 @@ static void ixgbe_reset_task(struct work_struct *work) | |||
2668 | static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | 3021 | static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) |
2669 | { | 3022 | { |
2670 | bool ret = false; | 3023 | bool ret = false; |
3024 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; | ||
2671 | 3025 | ||
2672 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 3026 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) |
2673 | adapter->ring_feature[RING_F_DCB].mask = 0x7 << 3; | 3027 | return ret; |
2674 | adapter->num_rx_queues = | 3028 | |
2675 | adapter->ring_feature[RING_F_DCB].indices; | 3029 | f->mask = 0x7 << 3; |
2676 | adapter->num_tx_queues = | 3030 | adapter->num_rx_queues = f->indices; |
2677 | adapter->ring_feature[RING_F_DCB].indices; | 3031 | adapter->num_tx_queues = f->indices; |
2678 | ret = true; | 3032 | ret = true; |
2679 | } else { | ||
2680 | ret = false; | ||
2681 | } | ||
2682 | 3033 | ||
2683 | return ret; | 3034 | return ret; |
2684 | } | 3035 | } |
@@ -2695,13 +3046,12 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | |||
2695 | static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | 3046 | static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) |
2696 | { | 3047 | { |
2697 | bool ret = false; | 3048 | bool ret = false; |
3049 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; | ||
2698 | 3050 | ||
2699 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 3051 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
2700 | adapter->ring_feature[RING_F_RSS].mask = 0xF; | 3052 | f->mask = 0xF; |
2701 | adapter->num_rx_queues = | 3053 | adapter->num_rx_queues = f->indices; |
2702 | adapter->ring_feature[RING_F_RSS].indices; | 3054 | adapter->num_tx_queues = f->indices; |
2703 | adapter->num_tx_queues = | ||
2704 | adapter->ring_feature[RING_F_RSS].indices; | ||
2705 | ret = true; | 3055 | ret = true; |
2706 | } else { | 3056 | } else { |
2707 | ret = false; | 3057 | ret = false; |
@@ -2710,6 +3060,79 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | |||
2710 | return ret; | 3060 | return ret; |
2711 | } | 3061 | } |
2712 | 3062 | ||
3063 | /** | ||
3064 | * ixgbe_set_fdir_queues: Allocate queues for Flow Director | ||
3065 | * @adapter: board private structure to initialize | ||
3066 | * | ||
3067 | * Flow Director is an advanced Rx filter, attempting to get Rx flows back | ||
3068 | * to the original CPU that initiated the Tx session. This runs in addition | ||
3069 | * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the | ||
3070 | * Rx load across CPUs using RSS. | ||
3071 | * | ||
3072 | **/ | ||
3073 | static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) | ||
3074 | { | ||
3075 | bool ret = false; | ||
3076 | struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; | ||
3077 | |||
3078 | f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); | ||
3079 | f_fdir->mask = 0; | ||
3080 | |||
3081 | /* Flow Director must have RSS enabled */ | ||
3082 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && | ||
3083 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
3084 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) { | ||
3085 | adapter->num_tx_queues = f_fdir->indices; | ||
3086 | adapter->num_rx_queues = f_fdir->indices; | ||
3087 | ret = true; | ||
3088 | } else { | ||
3089 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
3090 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
3091 | } | ||
3092 | return ret; | ||
3093 | } | ||
3094 | |||
3095 | #ifdef IXGBE_FCOE | ||
3096 | /** | ||
3097 | * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) | ||
3098 | * @adapter: board private structure to initialize | ||
3099 | * | ||
3100 | * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. | ||
3101 | * The ring feature mask is not used as a mask for FCoE, as it can take any 8 | ||
3102 | * rx queues out of the max number of rx queues, instead, it is used as the | ||
3103 | * index of the first rx queue used by FCoE. | ||
3104 | * | ||
3105 | **/ | ||
3106 | static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | ||
3107 | { | ||
3108 | bool ret = false; | ||
3109 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | ||
3110 | |||
3111 | f->indices = min((int)num_online_cpus(), f->indices); | ||
3112 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
3113 | #ifdef CONFIG_IXGBE_DCB | ||
3114 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
3115 | DPRINTK(PROBE, INFO, "FCOE enabled with DCB \n"); | ||
3116 | ixgbe_set_dcb_queues(adapter); | ||
3117 | } | ||
3118 | #endif | ||
3119 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
3120 | DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n"); | ||
3121 | ixgbe_set_rss_queues(adapter); | ||
3122 | } | ||
3123 | /* adding FCoE rx rings to the end */ | ||
3124 | f->mask = adapter->num_rx_queues; | ||
3125 | adapter->num_rx_queues += f->indices; | ||
3126 | if (adapter->num_tx_queues == 0) | ||
3127 | adapter->num_tx_queues = f->indices; | ||
3128 | |||
3129 | ret = true; | ||
3130 | } | ||
3131 | |||
3132 | return ret; | ||
3133 | } | ||
3134 | |||
3135 | #endif /* IXGBE_FCOE */ | ||
2713 | /* | 3136 | /* |
2714 | * ixgbe_set_num_queues: Allocate queues for device, feature dependant | 3137 | * ixgbe_set_num_queues: Allocate queues for device, feature dependant |
2715 | * @adapter: board private structure to initialize | 3138 | * @adapter: board private structure to initialize |
@@ -2723,11 +3146,19 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | |||
2723 | **/ | 3146 | **/ |
2724 | static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | 3147 | static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) |
2725 | { | 3148 | { |
3149 | #ifdef IXGBE_FCOE | ||
3150 | if (ixgbe_set_fcoe_queues(adapter)) | ||
3151 | goto done; | ||
3152 | |||
3153 | #endif /* IXGBE_FCOE */ | ||
2726 | #ifdef CONFIG_IXGBE_DCB | 3154 | #ifdef CONFIG_IXGBE_DCB |
2727 | if (ixgbe_set_dcb_queues(adapter)) | 3155 | if (ixgbe_set_dcb_queues(adapter)) |
2728 | goto done; | 3156 | goto done; |
2729 | 3157 | ||
2730 | #endif | 3158 | #endif |
3159 | if (ixgbe_set_fdir_queues(adapter)) | ||
3160 | goto done; | ||
3161 | |||
2731 | if (ixgbe_set_rss_queues(adapter)) | 3162 | if (ixgbe_set_rss_queues(adapter)) |
2732 | goto done; | 3163 | goto done; |
2733 | 3164 | ||
@@ -2778,9 +3209,6 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
2778 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 3209 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
2779 | kfree(adapter->msix_entries); | 3210 | kfree(adapter->msix_entries); |
2780 | adapter->msix_entries = NULL; | 3211 | adapter->msix_entries = NULL; |
2781 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | ||
2782 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | ||
2783 | ixgbe_set_num_queues(adapter); | ||
2784 | } else { | 3212 | } else { |
2785 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ | 3213 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ |
2786 | /* | 3214 | /* |
@@ -2902,6 +3330,64 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
2902 | #endif | 3330 | #endif |
2903 | 3331 | ||
2904 | /** | 3332 | /** |
3333 | * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director | ||
3334 | * @adapter: board private structure to initialize | ||
3335 | * | ||
3336 | * Cache the descriptor ring offsets for Flow Director to the assigned rings. | ||
3337 | * | ||
3338 | **/ | ||
3339 | static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | ||
3340 | { | ||
3341 | int i; | ||
3342 | bool ret = false; | ||
3343 | |||
3344 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && | ||
3345 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | ||
3346 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { | ||
3347 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3348 | adapter->rx_ring[i].reg_idx = i; | ||
3349 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
3350 | adapter->tx_ring[i].reg_idx = i; | ||
3351 | ret = true; | ||
3352 | } | ||
3353 | |||
3354 | return ret; | ||
3355 | } | ||
3356 | |||
3357 | #ifdef IXGBE_FCOE | ||
3358 | /** | ||
3359 | * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE | ||
3360 | * @adapter: board private structure to initialize | ||
3361 | * | ||
3362 | * Cache the descriptor ring offsets for FCoE mode to the assigned rings. | ||
3363 | * | ||
3364 | */ | ||
3365 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | ||
3366 | { | ||
3367 | int i, fcoe_i = 0; | ||
3368 | bool ret = false; | ||
3369 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | ||
3370 | |||
3371 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
3372 | #ifdef CONFIG_IXGBE_DCB | ||
3373 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
3374 | ixgbe_cache_ring_dcb(adapter); | ||
3375 | fcoe_i = adapter->rx_ring[0].reg_idx + 1; | ||
3376 | } | ||
3377 | #endif /* CONFIG_IXGBE_DCB */ | ||
3378 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
3379 | ixgbe_cache_ring_rss(adapter); | ||
3380 | fcoe_i = f->mask; | ||
3381 | } | ||
3382 | for (i = 0; i < f->indices; i++, fcoe_i++) | ||
3383 | adapter->rx_ring[f->mask + i].reg_idx = fcoe_i; | ||
3384 | ret = true; | ||
3385 | } | ||
3386 | return ret; | ||
3387 | } | ||
3388 | |||
3389 | #endif /* IXGBE_FCOE */ | ||
3390 | /** | ||
2905 | * ixgbe_cache_ring_register - Descriptor ring to register mapping | 3391 | * ixgbe_cache_ring_register - Descriptor ring to register mapping |
2906 | * @adapter: board private structure to initialize | 3392 | * @adapter: board private structure to initialize |
2907 | * | 3393 | * |
@@ -2918,11 +3404,19 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |||
2918 | adapter->rx_ring[0].reg_idx = 0; | 3404 | adapter->rx_ring[0].reg_idx = 0; |
2919 | adapter->tx_ring[0].reg_idx = 0; | 3405 | adapter->tx_ring[0].reg_idx = 0; |
2920 | 3406 | ||
3407 | #ifdef IXGBE_FCOE | ||
3408 | if (ixgbe_cache_ring_fcoe(adapter)) | ||
3409 | return; | ||
3410 | |||
3411 | #endif /* IXGBE_FCOE */ | ||
2921 | #ifdef CONFIG_IXGBE_DCB | 3412 | #ifdef CONFIG_IXGBE_DCB |
2922 | if (ixgbe_cache_ring_dcb(adapter)) | 3413 | if (ixgbe_cache_ring_dcb(adapter)) |
2923 | return; | 3414 | return; |
2924 | 3415 | ||
2925 | #endif | 3416 | #endif |
3417 | if (ixgbe_cache_ring_fdir(adapter)) | ||
3418 | return; | ||
3419 | |||
2926 | if (ixgbe_cache_ring_rss(adapter)) | 3420 | if (ixgbe_cache_ring_rss(adapter)) |
2927 | return; | 3421 | return; |
2928 | } | 3422 | } |
@@ -3004,31 +3498,23 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
3004 | * mean we disable MSI-X capabilities of the adapter. */ | 3498 | * mean we disable MSI-X capabilities of the adapter. */ |
3005 | adapter->msix_entries = kcalloc(v_budget, | 3499 | adapter->msix_entries = kcalloc(v_budget, |
3006 | sizeof(struct msix_entry), GFP_KERNEL); | 3500 | sizeof(struct msix_entry), GFP_KERNEL); |
3007 | if (!adapter->msix_entries) { | 3501 | if (adapter->msix_entries) { |
3008 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 3502 | for (vector = 0; vector < v_budget; vector++) |
3009 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 3503 | adapter->msix_entries[vector].entry = vector; |
3010 | ixgbe_set_num_queues(adapter); | ||
3011 | kfree(adapter->tx_ring); | ||
3012 | kfree(adapter->rx_ring); | ||
3013 | err = ixgbe_alloc_queues(adapter); | ||
3014 | if (err) { | ||
3015 | DPRINTK(PROBE, ERR, "Unable to allocate memory " | ||
3016 | "for queues\n"); | ||
3017 | goto out; | ||
3018 | } | ||
3019 | 3504 | ||
3020 | goto try_msi; | 3505 | ixgbe_acquire_msix_vectors(adapter, v_budget); |
3021 | } | ||
3022 | 3506 | ||
3023 | for (vector = 0; vector < v_budget; vector++) | 3507 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
3024 | adapter->msix_entries[vector].entry = vector; | 3508 | goto out; |
3025 | 3509 | } | |
3026 | ixgbe_acquire_msix_vectors(adapter, v_budget); | ||
3027 | 3510 | ||
3028 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 3511 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
3029 | goto out; | 3512 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
3513 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
3514 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
3515 | adapter->atr_sample_rate = 0; | ||
3516 | ixgbe_set_num_queues(adapter); | ||
3030 | 3517 | ||
3031 | try_msi: | ||
3032 | err = pci_enable_msi(adapter->pdev); | 3518 | err = pci_enable_msi(adapter->pdev); |
3033 | if (!err) { | 3519 | if (!err) { |
3034 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | 3520 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; |
@@ -3043,6 +3529,79 @@ out: | |||
3043 | return err; | 3529 | return err; |
3044 | } | 3530 | } |
3045 | 3531 | ||
3532 | /** | ||
3533 | * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors | ||
3534 | * @adapter: board private structure to initialize | ||
3535 | * | ||
3536 | * We allocate one q_vector per queue interrupt. If allocation fails we | ||
3537 | * return -ENOMEM. | ||
3538 | **/ | ||
3539 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | ||
3540 | { | ||
3541 | int q_idx, num_q_vectors; | ||
3542 | struct ixgbe_q_vector *q_vector; | ||
3543 | int napi_vectors; | ||
3544 | int (*poll)(struct napi_struct *, int); | ||
3545 | |||
3546 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
3547 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
3548 | napi_vectors = adapter->num_rx_queues; | ||
3549 | poll = &ixgbe_clean_rxtx_many; | ||
3550 | } else { | ||
3551 | num_q_vectors = 1; | ||
3552 | napi_vectors = 1; | ||
3553 | poll = &ixgbe_poll; | ||
3554 | } | ||
3555 | |||
3556 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | ||
3557 | q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL); | ||
3558 | if (!q_vector) | ||
3559 | goto err_out; | ||
3560 | q_vector->adapter = adapter; | ||
3561 | q_vector->eitr = adapter->eitr_param; | ||
3562 | q_vector->v_idx = q_idx; | ||
3563 | netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); | ||
3564 | adapter->q_vector[q_idx] = q_vector; | ||
3565 | } | ||
3566 | |||
3567 | return 0; | ||
3568 | |||
3569 | err_out: | ||
3570 | while (q_idx) { | ||
3571 | q_idx--; | ||
3572 | q_vector = adapter->q_vector[q_idx]; | ||
3573 | netif_napi_del(&q_vector->napi); | ||
3574 | kfree(q_vector); | ||
3575 | adapter->q_vector[q_idx] = NULL; | ||
3576 | } | ||
3577 | return -ENOMEM; | ||
3578 | } | ||
3579 | |||
3580 | /** | ||
3581 | * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors | ||
3582 | * @adapter: board private structure to initialize | ||
3583 | * | ||
3584 | * This function frees the memory allocated to the q_vectors. In addition if | ||
3585 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
3586 | * to freeing the q_vector. | ||
3587 | **/ | ||
3588 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) | ||
3589 | { | ||
3590 | int q_idx, num_q_vectors; | ||
3591 | |||
3592 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | ||
3593 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
3594 | else | ||
3595 | num_q_vectors = 1; | ||
3596 | |||
3597 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | ||
3598 | struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; | ||
3599 | adapter->q_vector[q_idx] = NULL; | ||
3600 | netif_napi_del(&q_vector->napi); | ||
3601 | kfree(q_vector); | ||
3602 | } | ||
3603 | } | ||
3604 | |||
3046 | void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) | 3605 | void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) |
3047 | { | 3606 | { |
3048 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 3607 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
@@ -3074,18 +3633,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
3074 | /* Number of supported queues */ | 3633 | /* Number of supported queues */ |
3075 | ixgbe_set_num_queues(adapter); | 3634 | ixgbe_set_num_queues(adapter); |
3076 | 3635 | ||
3077 | err = ixgbe_alloc_queues(adapter); | ||
3078 | if (err) { | ||
3079 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | ||
3080 | goto err_alloc_queues; | ||
3081 | } | ||
3082 | |||
3083 | err = ixgbe_set_interrupt_capability(adapter); | 3636 | err = ixgbe_set_interrupt_capability(adapter); |
3084 | if (err) { | 3637 | if (err) { |
3085 | DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); | 3638 | DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); |
3086 | goto err_set_interrupt; | 3639 | goto err_set_interrupt; |
3087 | } | 3640 | } |
3088 | 3641 | ||
3642 | err = ixgbe_alloc_q_vectors(adapter); | ||
3643 | if (err) { | ||
3644 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queue " | ||
3645 | "vectors\n"); | ||
3646 | goto err_alloc_q_vectors; | ||
3647 | } | ||
3648 | |||
3649 | err = ixgbe_alloc_queues(adapter); | ||
3650 | if (err) { | ||
3651 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | ||
3652 | goto err_alloc_queues; | ||
3653 | } | ||
3654 | |||
3089 | DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " | 3655 | DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " |
3090 | "Tx Queue count = %u\n", | 3656 | "Tx Queue count = %u\n", |
3091 | (adapter->num_rx_queues > 1) ? "Enabled" : | 3657 | (adapter->num_rx_queues > 1) ? "Enabled" : |
@@ -3095,11 +3661,30 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
3095 | 3661 | ||
3096 | return 0; | 3662 | return 0; |
3097 | 3663 | ||
3664 | err_alloc_queues: | ||
3665 | ixgbe_free_q_vectors(adapter); | ||
3666 | err_alloc_q_vectors: | ||
3667 | ixgbe_reset_interrupt_capability(adapter); | ||
3098 | err_set_interrupt: | 3668 | err_set_interrupt: |
3669 | return err; | ||
3670 | } | ||
3671 | |||
3672 | /** | ||
3673 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings | ||
3674 | * @adapter: board private structure to clear interrupt scheme on | ||
3675 | * | ||
3676 | * We go through and clear interrupt specific resources and reset the structure | ||
3677 | * to pre-load conditions | ||
3678 | **/ | ||
3679 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | ||
3680 | { | ||
3099 | kfree(adapter->tx_ring); | 3681 | kfree(adapter->tx_ring); |
3100 | kfree(adapter->rx_ring); | 3682 | kfree(adapter->rx_ring); |
3101 | err_alloc_queues: | 3683 | adapter->tx_ring = NULL; |
3102 | return err; | 3684 | adapter->rx_ring = NULL; |
3685 | |||
3686 | ixgbe_free_q_vectors(adapter); | ||
3687 | ixgbe_reset_interrupt_capability(adapter); | ||
3103 | } | 3688 | } |
3104 | 3689 | ||
3105 | /** | 3690 | /** |
@@ -3185,10 +3770,24 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3185 | adapter->ring_feature[RING_F_RSS].indices = rss; | 3770 | adapter->ring_feature[RING_F_RSS].indices = rss; |
3186 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 3771 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
3187 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; | 3772 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; |
3188 | if (hw->mac.type == ixgbe_mac_82598EB) | 3773 | if (hw->mac.type == ixgbe_mac_82598EB) { |
3774 | if (hw->device_id == IXGBE_DEV_ID_82598AT) | ||
3775 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; | ||
3189 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; | 3776 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; |
3190 | else if (hw->mac.type == ixgbe_mac_82599EB) | 3777 | } else if (hw->mac.type == ixgbe_mac_82599EB) { |
3191 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; | 3778 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; |
3779 | adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE; | ||
3780 | adapter->flags |= IXGBE_FLAG2_RSC_ENABLED; | ||
3781 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
3782 | adapter->ring_feature[RING_F_FDIR].indices = | ||
3783 | IXGBE_MAX_FDIR_INDICES; | ||
3784 | adapter->atr_sample_rate = 20; | ||
3785 | adapter->fdir_pballoc = 0; | ||
3786 | #ifdef IXGBE_FCOE | ||
3787 | adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; | ||
3788 | adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; | ||
3789 | #endif /* IXGBE_FCOE */ | ||
3790 | } | ||
3192 | 3791 | ||
3193 | #ifdef CONFIG_IXGBE_DCB | 3792 | #ifdef CONFIG_IXGBE_DCB |
3194 | /* Configure DCB traffic classes */ | 3793 | /* Configure DCB traffic classes */ |
@@ -3203,6 +3802,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3203 | adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; | 3802 | adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; |
3204 | adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; | 3803 | adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; |
3205 | adapter->dcb_cfg.rx_pba_cfg = pba_equal; | 3804 | adapter->dcb_cfg.rx_pba_cfg = pba_equal; |
3805 | adapter->dcb_cfg.pfc_mode_enable = false; | ||
3206 | adapter->dcb_cfg.round_robin_enable = false; | 3806 | adapter->dcb_cfg.round_robin_enable = false; |
3207 | adapter->dcb_set_bitmap = 0x00; | 3807 | adapter->dcb_set_bitmap = 0x00; |
3208 | ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, | 3808 | ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, |
@@ -3213,6 +3813,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3213 | /* default flow control settings */ | 3813 | /* default flow control settings */ |
3214 | hw->fc.requested_mode = ixgbe_fc_full; | 3814 | hw->fc.requested_mode = ixgbe_fc_full; |
3215 | hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ | 3815 | hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ |
3816 | #ifdef CONFIG_DCB | ||
3817 | adapter->last_lfc_mode = hw->fc.current_mode; | ||
3818 | #endif | ||
3216 | hw->fc.high_water = IXGBE_DEFAULT_FCRTH; | 3819 | hw->fc.high_water = IXGBE_DEFAULT_FCRTH; |
3217 | hw->fc.low_water = IXGBE_DEFAULT_FCRTL; | 3820 | hw->fc.low_water = IXGBE_DEFAULT_FCRTL; |
3218 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; | 3821 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; |
@@ -3503,6 +4106,8 @@ static int ixgbe_open(struct net_device *netdev) | |||
3503 | if (test_bit(__IXGBE_TESTING, &adapter->state)) | 4106 | if (test_bit(__IXGBE_TESTING, &adapter->state)) |
3504 | return -EBUSY; | 4107 | return -EBUSY; |
3505 | 4108 | ||
4109 | netif_carrier_off(netdev); | ||
4110 | |||
3506 | /* allocate transmit descriptors */ | 4111 | /* allocate transmit descriptors */ |
3507 | err = ixgbe_setup_all_tx_resources(adapter); | 4112 | err = ixgbe_setup_all_tx_resources(adapter); |
3508 | if (err) | 4113 | if (err) |
@@ -3515,8 +4120,6 @@ static int ixgbe_open(struct net_device *netdev) | |||
3515 | 4120 | ||
3516 | ixgbe_configure(adapter); | 4121 | ixgbe_configure(adapter); |
3517 | 4122 | ||
3518 | ixgbe_napi_add_all(adapter); | ||
3519 | |||
3520 | err = ixgbe_request_irq(adapter); | 4123 | err = ixgbe_request_irq(adapter); |
3521 | if (err) | 4124 | if (err) |
3522 | goto err_req_irq; | 4125 | goto err_req_irq; |
@@ -3568,55 +4171,6 @@ static int ixgbe_close(struct net_device *netdev) | |||
3568 | return 0; | 4171 | return 0; |
3569 | } | 4172 | } |
3570 | 4173 | ||
3571 | /** | ||
3572 | * ixgbe_napi_add_all - prep napi structs for use | ||
3573 | * @adapter: private struct | ||
3574 | * | ||
3575 | * helper function to napi_add each possible q_vector->napi | ||
3576 | */ | ||
3577 | void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) | ||
3578 | { | ||
3579 | int q_idx, q_vectors; | ||
3580 | struct net_device *netdev = adapter->netdev; | ||
3581 | int (*poll)(struct napi_struct *, int); | ||
3582 | |||
3583 | /* check if we already have our netdev->napi_list populated */ | ||
3584 | if (&netdev->napi_list != netdev->napi_list.next) | ||
3585 | return; | ||
3586 | |||
3587 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
3588 | poll = &ixgbe_clean_rxonly; | ||
3589 | /* Only enable as many vectors as we have rx queues. */ | ||
3590 | q_vectors = adapter->num_rx_queues; | ||
3591 | } else { | ||
3592 | poll = &ixgbe_poll; | ||
3593 | /* only one q_vector for legacy modes */ | ||
3594 | q_vectors = 1; | ||
3595 | } | ||
3596 | |||
3597 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | ||
3598 | struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx]; | ||
3599 | netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); | ||
3600 | } | ||
3601 | } | ||
3602 | |||
3603 | void ixgbe_napi_del_all(struct ixgbe_adapter *adapter) | ||
3604 | { | ||
3605 | int q_idx; | ||
3606 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
3607 | |||
3608 | /* legacy and MSI only use one vector */ | ||
3609 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | ||
3610 | q_vectors = 1; | ||
3611 | |||
3612 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | ||
3613 | struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx]; | ||
3614 | if (!q_vector->rxr_count) | ||
3615 | continue; | ||
3616 | netif_napi_del(&q_vector->napi); | ||
3617 | } | ||
3618 | } | ||
3619 | |||
3620 | #ifdef CONFIG_PM | 4174 | #ifdef CONFIG_PM |
3621 | static int ixgbe_resume(struct pci_dev *pdev) | 4175 | static int ixgbe_resume(struct pci_dev *pdev) |
3622 | { | 4176 | { |
@@ -3626,7 +4180,8 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
3626 | 4180 | ||
3627 | pci_set_power_state(pdev, PCI_D0); | 4181 | pci_set_power_state(pdev, PCI_D0); |
3628 | pci_restore_state(pdev); | 4182 | pci_restore_state(pdev); |
3629 | err = pci_enable_device(pdev); | 4183 | |
4184 | err = pci_enable_device_mem(pdev); | ||
3630 | if (err) { | 4185 | if (err) { |
3631 | printk(KERN_ERR "ixgbe: Cannot enable PCI device from " | 4186 | printk(KERN_ERR "ixgbe: Cannot enable PCI device from " |
3632 | "suspend\n"); | 4187 | "suspend\n"); |
@@ -3634,8 +4189,7 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
3634 | } | 4189 | } |
3635 | pci_set_master(pdev); | 4190 | pci_set_master(pdev); |
3636 | 4191 | ||
3637 | pci_enable_wake(pdev, PCI_D3hot, 0); | 4192 | pci_wake_from_d3(pdev, false); |
3638 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
3639 | 4193 | ||
3640 | err = ixgbe_init_interrupt_scheme(adapter); | 4194 | err = ixgbe_init_interrupt_scheme(adapter); |
3641 | if (err) { | 4195 | if (err) { |
@@ -3679,11 +4233,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
3679 | ixgbe_free_all_tx_resources(adapter); | 4233 | ixgbe_free_all_tx_resources(adapter); |
3680 | ixgbe_free_all_rx_resources(adapter); | 4234 | ixgbe_free_all_rx_resources(adapter); |
3681 | } | 4235 | } |
3682 | ixgbe_reset_interrupt_capability(adapter); | 4236 | ixgbe_clear_interrupt_scheme(adapter); |
3683 | ixgbe_napi_del_all(adapter); | ||
3684 | INIT_LIST_HEAD(&netdev->napi_list); | ||
3685 | kfree(adapter->tx_ring); | ||
3686 | kfree(adapter->rx_ring); | ||
3687 | 4237 | ||
3688 | #ifdef CONFIG_PM | 4238 | #ifdef CONFIG_PM |
3689 | retval = pci_save_state(pdev); | 4239 | retval = pci_save_state(pdev); |
@@ -3711,13 +4261,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
3711 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); | 4261 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); |
3712 | } | 4262 | } |
3713 | 4263 | ||
3714 | if (wufc && hw->mac.type == ixgbe_mac_82599EB) { | 4264 | if (wufc && hw->mac.type == ixgbe_mac_82599EB) |
3715 | pci_enable_wake(pdev, PCI_D3hot, 1); | 4265 | pci_wake_from_d3(pdev, true); |
3716 | pci_enable_wake(pdev, PCI_D3cold, 1); | 4266 | else |
3717 | } else { | 4267 | pci_wake_from_d3(pdev, false); |
3718 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
3719 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
3720 | } | ||
3721 | 4268 | ||
3722 | *enable_wake = !!wufc; | 4269 | *enable_wake = !!wufc; |
3723 | 4270 | ||
@@ -3772,9 +4319,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
3772 | u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; | 4319 | u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; |
3773 | 4320 | ||
3774 | if (hw->mac.type == ixgbe_mac_82599EB) { | 4321 | if (hw->mac.type == ixgbe_mac_82599EB) { |
4322 | u64 rsc_count = 0; | ||
3775 | for (i = 0; i < 16; i++) | 4323 | for (i = 0; i < 16; i++) |
3776 | adapter->hw_rx_no_dma_resources += | 4324 | adapter->hw_rx_no_dma_resources += |
3777 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); | 4325 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); |
4326 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
4327 | rsc_count += adapter->rx_ring[i].rsc_count; | ||
4328 | adapter->rsc_count = rsc_count; | ||
3778 | } | 4329 | } |
3779 | 4330 | ||
3780 | adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); | 4331 | adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); |
@@ -3821,6 +4372,16 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
3821 | IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ | 4372 | IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ |
3822 | adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); | 4373 | adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); |
3823 | adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); | 4374 | adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); |
4375 | adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); | ||
4376 | adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); | ||
4377 | #ifdef IXGBE_FCOE | ||
4378 | adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); | ||
4379 | adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); | ||
4380 | adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); | ||
4381 | adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); | ||
4382 | adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); | ||
4383 | adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); | ||
4384 | #endif /* IXGBE_FCOE */ | ||
3824 | } else { | 4385 | } else { |
3825 | adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); | 4386 | adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); |
3826 | adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); | 4387 | adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); |
@@ -3888,64 +4449,43 @@ static void ixgbe_watchdog(unsigned long data) | |||
3888 | { | 4449 | { |
3889 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; | 4450 | struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; |
3890 | struct ixgbe_hw *hw = &adapter->hw; | 4451 | struct ixgbe_hw *hw = &adapter->hw; |
4452 | u64 eics = 0; | ||
4453 | int i; | ||
3891 | 4454 | ||
3892 | /* Do the watchdog outside of interrupt context due to the lovely | 4455 | /* |
3893 | * delays that some of the newer hardware requires */ | 4456 | * Do the watchdog outside of interrupt context due to the lovely |
3894 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { | 4457 | * delays that some of the newer hardware requires |
3895 | u64 eics = 0; | 4458 | */ |
3896 | int i; | ||
3897 | 4459 | ||
3898 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) | 4460 | if (test_bit(__IXGBE_DOWN, &adapter->state)) |
3899 | eics |= (1 << i); | 4461 | goto watchdog_short_circuit; |
3900 | 4462 | ||
3901 | /* Cause software interrupt to ensure rx rings are cleaned */ | 4463 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { |
3902 | switch (hw->mac.type) { | 4464 | /* |
3903 | case ixgbe_mac_82598EB: | 4465 | * for legacy and MSI interrupts don't set any bits |
3904 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 4466 | * that are enabled for EIAM, because this operation |
3905 | IXGBE_WRITE_REG(hw, IXGBE_EICS, (u32)eics); | 4467 | * would set *both* EIMS and EICS for any bit in EIAM |
3906 | } else { | 4468 | */ |
3907 | /* | 4469 | IXGBE_WRITE_REG(hw, IXGBE_EICS, |
3908 | * for legacy and MSI interrupts don't set any | 4470 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); |
3909 | * bits that are enabled for EIAM, because this | 4471 | goto watchdog_reschedule; |
3910 | * operation would set *both* EIMS and EICS for | ||
3911 | * any bit in EIAM | ||
3912 | */ | ||
3913 | IXGBE_WRITE_REG(hw, IXGBE_EICS, | ||
3914 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); | ||
3915 | } | ||
3916 | break; | ||
3917 | case ixgbe_mac_82599EB: | ||
3918 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
3919 | /* | ||
3920 | * EICS(0..15) first 0-15 q vectors | ||
3921 | * EICS[1] (16..31) q vectors 16-31 | ||
3922 | * EICS[2] (0..31) q vectors 32-63 | ||
3923 | */ | ||
3924 | IXGBE_WRITE_REG(hw, IXGBE_EICS, | ||
3925 | (u32)(eics & 0xFFFF)); | ||
3926 | IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1), | ||
3927 | (u32)(eics & 0xFFFF0000)); | ||
3928 | IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(2), | ||
3929 | (u32)(eics >> 32)); | ||
3930 | } else { | ||
3931 | /* | ||
3932 | * for legacy and MSI interrupts don't set any | ||
3933 | * bits that are enabled for EIAM, because this | ||
3934 | * operation would set *both* EIMS and EICS for | ||
3935 | * any bit in EIAM | ||
3936 | */ | ||
3937 | IXGBE_WRITE_REG(hw, IXGBE_EICS, | ||
3938 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); | ||
3939 | } | ||
3940 | break; | ||
3941 | default: | ||
3942 | break; | ||
3943 | } | ||
3944 | /* Reset the timer */ | ||
3945 | mod_timer(&adapter->watchdog_timer, | ||
3946 | round_jiffies(jiffies + 2 * HZ)); | ||
3947 | } | 4472 | } |
3948 | 4473 | ||
4474 | /* get one bit for every active tx/rx interrupt vector */ | ||
4475 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { | ||
4476 | struct ixgbe_q_vector *qv = adapter->q_vector[i]; | ||
4477 | if (qv->rxr_count || qv->txr_count) | ||
4478 | eics |= ((u64)1 << i); | ||
4479 | } | ||
4480 | |||
4481 | /* Cause software interrupt to ensure rx rings are cleaned */ | ||
4482 | ixgbe_irq_rearm_queues(adapter, eics); | ||
4483 | |||
4484 | watchdog_reschedule: | ||
4485 | /* Reset the timer */ | ||
4486 | mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); | ||
4487 | |||
4488 | watchdog_short_circuit: | ||
3949 | schedule_work(&adapter->watchdog_task); | 4489 | schedule_work(&adapter->watchdog_task); |
3950 | } | 4490 | } |
3951 | 4491 | ||
@@ -3999,6 +4539,30 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) | |||
3999 | } | 4539 | } |
4000 | 4540 | ||
4001 | /** | 4541 | /** |
4542 | * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table | ||
4543 | * @work: pointer to work_struct containing our data | ||
4544 | **/ | ||
4545 | static void ixgbe_fdir_reinit_task(struct work_struct *work) | ||
4546 | { | ||
4547 | struct ixgbe_adapter *adapter = container_of(work, | ||
4548 | struct ixgbe_adapter, | ||
4549 | fdir_reinit_task); | ||
4550 | struct ixgbe_hw *hw = &adapter->hw; | ||
4551 | int i; | ||
4552 | |||
4553 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { | ||
4554 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
4555 | set_bit(__IXGBE_FDIR_INIT_DONE, | ||
4556 | &(adapter->tx_ring[i].reinit_state)); | ||
4557 | } else { | ||
4558 | DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " | ||
4559 | "ignored adding FDIR ATR filters \n"); | ||
4560 | } | ||
4561 | /* Done FDIR Re-initialization, enable transmits */ | ||
4562 | netif_tx_start_all_queues(adapter->netdev); | ||
4563 | } | ||
4564 | |||
4565 | /** | ||
4002 | * ixgbe_watchdog_task - worker thread to bring link up | 4566 | * ixgbe_watchdog_task - worker thread to bring link up |
4003 | * @work: pointer to work_struct containing our data | 4567 | * @work: pointer to work_struct containing our data |
4004 | **/ | 4568 | **/ |
@@ -4011,16 +4575,32 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
4011 | struct ixgbe_hw *hw = &adapter->hw; | 4575 | struct ixgbe_hw *hw = &adapter->hw; |
4012 | u32 link_speed = adapter->link_speed; | 4576 | u32 link_speed = adapter->link_speed; |
4013 | bool link_up = adapter->link_up; | 4577 | bool link_up = adapter->link_up; |
4578 | int i; | ||
4579 | struct ixgbe_ring *tx_ring; | ||
4580 | int some_tx_pending = 0; | ||
4014 | 4581 | ||
4015 | adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; | 4582 | adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; |
4016 | 4583 | ||
4017 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { | 4584 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { |
4018 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); | 4585 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); |
4586 | if (link_up) { | ||
4587 | #ifdef CONFIG_DCB | ||
4588 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
4589 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) | ||
4590 | hw->mac.ops.fc_enable(hw, i); | ||
4591 | } else { | ||
4592 | hw->mac.ops.fc_enable(hw, 0); | ||
4593 | } | ||
4594 | #else | ||
4595 | hw->mac.ops.fc_enable(hw, 0); | ||
4596 | #endif | ||
4597 | } | ||
4598 | |||
4019 | if (link_up || | 4599 | if (link_up || |
4020 | time_after(jiffies, (adapter->link_check_timeout + | 4600 | time_after(jiffies, (adapter->link_check_timeout + |
4021 | IXGBE_TRY_LINK_TIMEOUT))) { | 4601 | IXGBE_TRY_LINK_TIMEOUT))) { |
4022 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); | ||
4023 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; | 4602 | adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; |
4603 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); | ||
4024 | } | 4604 | } |
4025 | adapter->link_up = link_up; | 4605 | adapter->link_up = link_up; |
4026 | adapter->link_speed = link_speed; | 4606 | adapter->link_speed = link_speed; |
@@ -4068,6 +4648,25 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
4068 | } | 4648 | } |
4069 | } | 4649 | } |
4070 | 4650 | ||
4651 | if (!netif_carrier_ok(netdev)) { | ||
4652 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
4653 | tx_ring = &adapter->tx_ring[i]; | ||
4654 | if (tx_ring->next_to_use != tx_ring->next_to_clean) { | ||
4655 | some_tx_pending = 1; | ||
4656 | break; | ||
4657 | } | ||
4658 | } | ||
4659 | |||
4660 | if (some_tx_pending) { | ||
4661 | /* We've lost link, so the controller stops DMA, | ||
4662 | * but we've got queued Tx work that's never going | ||
4663 | * to get done, so reset controller to flush Tx. | ||
4664 | * (Do the reset outside of interrupt context). | ||
4665 | */ | ||
4666 | schedule_work(&adapter->reset_task); | ||
4667 | } | ||
4668 | } | ||
4669 | |||
4071 | ixgbe_update_stats(adapter); | 4670 | ixgbe_update_stats(adapter); |
4072 | adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; | 4671 | adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; |
4073 | } | 4672 | } |
@@ -4196,12 +4795,18 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | |||
4196 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 4795 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
4197 | type_tucmd_mlhl |= | 4796 | type_tucmd_mlhl |= |
4198 | IXGBE_ADVTXD_TUCMD_L4T_TCP; | 4797 | IXGBE_ADVTXD_TUCMD_L4T_TCP; |
4798 | else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) | ||
4799 | type_tucmd_mlhl |= | ||
4800 | IXGBE_ADVTXD_TUCMD_L4T_SCTP; | ||
4199 | break; | 4801 | break; |
4200 | case cpu_to_be16(ETH_P_IPV6): | 4802 | case cpu_to_be16(ETH_P_IPV6): |
4201 | /* XXX what about other V6 headers?? */ | 4803 | /* XXX what about other V6 headers?? */ |
4202 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | 4804 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
4203 | type_tucmd_mlhl |= | 4805 | type_tucmd_mlhl |= |
4204 | IXGBE_ADVTXD_TUCMD_L4T_TCP; | 4806 | IXGBE_ADVTXD_TUCMD_L4T_TCP; |
4807 | else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) | ||
4808 | type_tucmd_mlhl |= | ||
4809 | IXGBE_ADVTXD_TUCMD_L4T_SCTP; | ||
4205 | break; | 4810 | break; |
4206 | default: | 4811 | default: |
4207 | if (unlikely(net_ratelimit())) { | 4812 | if (unlikely(net_ratelimit())) { |
@@ -4234,10 +4839,12 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | |||
4234 | 4839 | ||
4235 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | 4840 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, |
4236 | struct ixgbe_ring *tx_ring, | 4841 | struct ixgbe_ring *tx_ring, |
4237 | struct sk_buff *skb, unsigned int first) | 4842 | struct sk_buff *skb, u32 tx_flags, |
4843 | unsigned int first) | ||
4238 | { | 4844 | { |
4239 | struct ixgbe_tx_buffer *tx_buffer_info; | 4845 | struct ixgbe_tx_buffer *tx_buffer_info; |
4240 | unsigned int len = skb_headlen(skb); | 4846 | unsigned int len; |
4847 | unsigned int total = skb->len; | ||
4241 | unsigned int offset = 0, size, count = 0, i; | 4848 | unsigned int offset = 0, size, count = 0, i; |
4242 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 4849 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
4243 | unsigned int f; | 4850 | unsigned int f; |
@@ -4252,16 +4859,22 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
4252 | 4859 | ||
4253 | map = skb_shinfo(skb)->dma_maps; | 4860 | map = skb_shinfo(skb)->dma_maps; |
4254 | 4861 | ||
4862 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) | ||
4863 | /* excluding fcoe_crc_eof for FCoE */ | ||
4864 | total -= sizeof(struct fcoe_crc_eof); | ||
4865 | |||
4866 | len = min(skb_headlen(skb), total); | ||
4255 | while (len) { | 4867 | while (len) { |
4256 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 4868 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
4257 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); | 4869 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); |
4258 | 4870 | ||
4259 | tx_buffer_info->length = size; | 4871 | tx_buffer_info->length = size; |
4260 | tx_buffer_info->dma = map[0] + offset; | 4872 | tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset; |
4261 | tx_buffer_info->time_stamp = jiffies; | 4873 | tx_buffer_info->time_stamp = jiffies; |
4262 | tx_buffer_info->next_to_watch = i; | 4874 | tx_buffer_info->next_to_watch = i; |
4263 | 4875 | ||
4264 | len -= size; | 4876 | len -= size; |
4877 | total -= size; | ||
4265 | offset += size; | 4878 | offset += size; |
4266 | count++; | 4879 | count++; |
4267 | 4880 | ||
@@ -4276,7 +4889,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
4276 | struct skb_frag_struct *frag; | 4889 | struct skb_frag_struct *frag; |
4277 | 4890 | ||
4278 | frag = &skb_shinfo(skb)->frags[f]; | 4891 | frag = &skb_shinfo(skb)->frags[f]; |
4279 | len = frag->size; | 4892 | len = min((unsigned int)frag->size, total); |
4280 | offset = 0; | 4893 | offset = 0; |
4281 | 4894 | ||
4282 | while (len) { | 4895 | while (len) { |
@@ -4288,14 +4901,17 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
4288 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); | 4901 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); |
4289 | 4902 | ||
4290 | tx_buffer_info->length = size; | 4903 | tx_buffer_info->length = size; |
4291 | tx_buffer_info->dma = map[f + 1] + offset; | 4904 | tx_buffer_info->dma = map[f] + offset; |
4292 | tx_buffer_info->time_stamp = jiffies; | 4905 | tx_buffer_info->time_stamp = jiffies; |
4293 | tx_buffer_info->next_to_watch = i; | 4906 | tx_buffer_info->next_to_watch = i; |
4294 | 4907 | ||
4295 | len -= size; | 4908 | len -= size; |
4909 | total -= size; | ||
4296 | offset += size; | 4910 | offset += size; |
4297 | count++; | 4911 | count++; |
4298 | } | 4912 | } |
4913 | if (total == 0) | ||
4914 | break; | ||
4299 | } | 4915 | } |
4300 | 4916 | ||
4301 | tx_ring->tx_buffer_info[i].skb = skb; | 4917 | tx_ring->tx_buffer_info[i].skb = skb; |
@@ -4337,6 +4953,13 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
4337 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << | 4953 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << |
4338 | IXGBE_ADVTXD_POPTS_SHIFT; | 4954 | IXGBE_ADVTXD_POPTS_SHIFT; |
4339 | 4955 | ||
4956 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { | ||
4957 | olinfo_status |= IXGBE_ADVTXD_CC; | ||
4958 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); | ||
4959 | if (tx_flags & IXGBE_TX_FLAGS_FSO) | ||
4960 | cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; | ||
4961 | } | ||
4962 | |||
4340 | olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); | 4963 | olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); |
4341 | 4964 | ||
4342 | i = tx_ring->next_to_use; | 4965 | i = tx_ring->next_to_use; |
@@ -4366,6 +4989,58 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
4366 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | 4989 | writel(i, adapter->hw.hw_addr + tx_ring->tail); |
4367 | } | 4990 | } |
4368 | 4991 | ||
4992 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, | ||
4993 | int queue, u32 tx_flags) | ||
4994 | { | ||
4995 | /* Right now, we support IPv4 only */ | ||
4996 | struct ixgbe_atr_input atr_input; | ||
4997 | struct tcphdr *th; | ||
4998 | struct udphdr *uh; | ||
4999 | struct iphdr *iph = ip_hdr(skb); | ||
5000 | struct ethhdr *eth = (struct ethhdr *)skb->data; | ||
5001 | u16 vlan_id, src_port, dst_port, flex_bytes; | ||
5002 | u32 src_ipv4_addr, dst_ipv4_addr; | ||
5003 | u8 l4type = 0; | ||
5004 | |||
5005 | /* check if we're UDP or TCP */ | ||
5006 | if (iph->protocol == IPPROTO_TCP) { | ||
5007 | th = tcp_hdr(skb); | ||
5008 | src_port = th->source; | ||
5009 | dst_port = th->dest; | ||
5010 | l4type |= IXGBE_ATR_L4TYPE_TCP; | ||
5011 | /* l4type IPv4 type is 0, no need to assign */ | ||
5012 | } else if(iph->protocol == IPPROTO_UDP) { | ||
5013 | uh = udp_hdr(skb); | ||
5014 | src_port = uh->source; | ||
5015 | dst_port = uh->dest; | ||
5016 | l4type |= IXGBE_ATR_L4TYPE_UDP; | ||
5017 | /* l4type IPv4 type is 0, no need to assign */ | ||
5018 | } else { | ||
5019 | /* Unsupported L4 header, just bail here */ | ||
5020 | return; | ||
5021 | } | ||
5022 | |||
5023 | memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); | ||
5024 | |||
5025 | vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> | ||
5026 | IXGBE_TX_FLAGS_VLAN_SHIFT; | ||
5027 | src_ipv4_addr = iph->saddr; | ||
5028 | dst_ipv4_addr = iph->daddr; | ||
5029 | flex_bytes = eth->h_proto; | ||
5030 | |||
5031 | ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); | ||
5032 | ixgbe_atr_set_src_port_82599(&atr_input, dst_port); | ||
5033 | ixgbe_atr_set_dst_port_82599(&atr_input, src_port); | ||
5034 | ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); | ||
5035 | ixgbe_atr_set_l4type_82599(&atr_input, l4type); | ||
5036 | /* src and dst are inverted, think how the receiver sees them */ | ||
5037 | ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); | ||
5038 | ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); | ||
5039 | |||
5040 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ | ||
5041 | ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); | ||
5042 | } | ||
5043 | |||
4369 | static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | 5044 | static int __ixgbe_maybe_stop_tx(struct net_device *netdev, |
4370 | struct ixgbe_ring *tx_ring, int size) | 5045 | struct ixgbe_ring *tx_ring, int size) |
4371 | { | 5046 | { |
@@ -4400,6 +5075,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
4400 | { | 5075 | { |
4401 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 5076 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
4402 | 5077 | ||
5078 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | ||
5079 | return smp_processor_id(); | ||
5080 | |||
4403 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | 5081 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) |
4404 | return 0; /* All traffic should default to class 0 */ | 5082 | return 0; /* All traffic should default to class 0 */ |
4405 | 5083 | ||
@@ -4433,10 +5111,16 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
4433 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 5111 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
4434 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 5112 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
4435 | } | 5113 | } |
4436 | /* three things can cause us to need a context descriptor */ | 5114 | |
5115 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | ||
5116 | (skb->protocol == htons(ETH_P_FCOE))) | ||
5117 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | ||
5118 | |||
5119 | /* four things can cause us to need a context descriptor */ | ||
4437 | if (skb_is_gso(skb) || | 5120 | if (skb_is_gso(skb) || |
4438 | (skb->ip_summed == CHECKSUM_PARTIAL) || | 5121 | (skb->ip_summed == CHECKSUM_PARTIAL) || |
4439 | (tx_flags & IXGBE_TX_FLAGS_VLAN)) | 5122 | (tx_flags & IXGBE_TX_FLAGS_VLAN) || |
5123 | (tx_flags & IXGBE_TX_FLAGS_FCOE)) | ||
4440 | count++; | 5124 | count++; |
4441 | 5125 | ||
4442 | count += TXD_USE_COUNT(skb_headlen(skb)); | 5126 | count += TXD_USE_COUNT(skb_headlen(skb)); |
@@ -4448,27 +5132,49 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
4448 | return NETDEV_TX_BUSY; | 5132 | return NETDEV_TX_BUSY; |
4449 | } | 5133 | } |
4450 | 5134 | ||
4451 | if (skb->protocol == htons(ETH_P_IP)) | ||
4452 | tx_flags |= IXGBE_TX_FLAGS_IPV4; | ||
4453 | first = tx_ring->next_to_use; | 5135 | first = tx_ring->next_to_use; |
4454 | tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); | 5136 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { |
4455 | if (tso < 0) { | 5137 | #ifdef IXGBE_FCOE |
4456 | dev_kfree_skb_any(skb); | 5138 | /* setup tx offload for FCoE */ |
4457 | return NETDEV_TX_OK; | 5139 | tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len); |
4458 | } | 5140 | if (tso < 0) { |
4459 | 5141 | dev_kfree_skb_any(skb); | |
4460 | if (tso) | 5142 | return NETDEV_TX_OK; |
4461 | tx_flags |= IXGBE_TX_FLAGS_TSO; | 5143 | } |
4462 | else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && | 5144 | if (tso) |
4463 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 5145 | tx_flags |= IXGBE_TX_FLAGS_FSO; |
4464 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | 5146 | #endif /* IXGBE_FCOE */ |
5147 | } else { | ||
5148 | if (skb->protocol == htons(ETH_P_IP)) | ||
5149 | tx_flags |= IXGBE_TX_FLAGS_IPV4; | ||
5150 | tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); | ||
5151 | if (tso < 0) { | ||
5152 | dev_kfree_skb_any(skb); | ||
5153 | return NETDEV_TX_OK; | ||
5154 | } | ||
4465 | 5155 | ||
4466 | count = ixgbe_tx_map(adapter, tx_ring, skb, first); | 5156 | if (tso) |
5157 | tx_flags |= IXGBE_TX_FLAGS_TSO; | ||
5158 | else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && | ||
5159 | (skb->ip_summed == CHECKSUM_PARTIAL)) | ||
5160 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | ||
5161 | } | ||
4467 | 5162 | ||
5163 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); | ||
4468 | if (count) { | 5164 | if (count) { |
5165 | /* add the ATR filter if ATR is on */ | ||
5166 | if (tx_ring->atr_sample_rate) { | ||
5167 | ++tx_ring->atr_count; | ||
5168 | if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && | ||
5169 | test_bit(__IXGBE_FDIR_INIT_DONE, | ||
5170 | &tx_ring->reinit_state)) { | ||
5171 | ixgbe_atr(adapter, skb, tx_ring->queue_index, | ||
5172 | tx_flags); | ||
5173 | tx_ring->atr_count = 0; | ||
5174 | } | ||
5175 | } | ||
4469 | ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, | 5176 | ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, |
4470 | hdr_len); | 5177 | hdr_len); |
4471 | netdev->trans_start = jiffies; | ||
4472 | ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); | 5178 | ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); |
4473 | 5179 | ||
4474 | } else { | 5180 | } else { |
@@ -4519,6 +5225,82 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) | |||
4519 | return 0; | 5225 | return 0; |
4520 | } | 5226 | } |
4521 | 5227 | ||
5228 | static int | ||
5229 | ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) | ||
5230 | { | ||
5231 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
5232 | struct ixgbe_hw *hw = &adapter->hw; | ||
5233 | u16 value; | ||
5234 | int rc; | ||
5235 | |||
5236 | if (prtad != hw->phy.mdio.prtad) | ||
5237 | return -EINVAL; | ||
5238 | rc = hw->phy.ops.read_reg(hw, addr, devad, &value); | ||
5239 | if (!rc) | ||
5240 | rc = value; | ||
5241 | return rc; | ||
5242 | } | ||
5243 | |||
5244 | static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, | ||
5245 | u16 addr, u16 value) | ||
5246 | { | ||
5247 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
5248 | struct ixgbe_hw *hw = &adapter->hw; | ||
5249 | |||
5250 | if (prtad != hw->phy.mdio.prtad) | ||
5251 | return -EINVAL; | ||
5252 | return hw->phy.ops.write_reg(hw, addr, devad, value); | ||
5253 | } | ||
5254 | |||
5255 | static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) | ||
5256 | { | ||
5257 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
5258 | |||
5259 | return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); | ||
5260 | } | ||
5261 | |||
5262 | /** | ||
5263 | * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding | ||
5264 | * netdev->dev_addr_list | ||
5265 | * @netdev: network interface device structure | ||
5266 | * | ||
5267 | * Returns non-zero on failure | ||
5268 | **/ | ||
5269 | static int ixgbe_add_sanmac_netdev(struct net_device *dev) | ||
5270 | { | ||
5271 | int err = 0; | ||
5272 | struct ixgbe_adapter *adapter = netdev_priv(dev); | ||
5273 | struct ixgbe_mac_info *mac = &adapter->hw.mac; | ||
5274 | |||
5275 | if (is_valid_ether_addr(mac->san_addr)) { | ||
5276 | rtnl_lock(); | ||
5277 | err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); | ||
5278 | rtnl_unlock(); | ||
5279 | } | ||
5280 | return err; | ||
5281 | } | ||
5282 | |||
5283 | /** | ||
5284 | * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding | ||
5285 | * netdev->dev_addr_list | ||
5286 | * @netdev: network interface device structure | ||
5287 | * | ||
5288 | * Returns non-zero on failure | ||
5289 | **/ | ||
5290 | static int ixgbe_del_sanmac_netdev(struct net_device *dev) | ||
5291 | { | ||
5292 | int err = 0; | ||
5293 | struct ixgbe_adapter *adapter = netdev_priv(dev); | ||
5294 | struct ixgbe_mac_info *mac = &adapter->hw.mac; | ||
5295 | |||
5296 | if (is_valid_ether_addr(mac->san_addr)) { | ||
5297 | rtnl_lock(); | ||
5298 | err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); | ||
5299 | rtnl_unlock(); | ||
5300 | } | ||
5301 | return err; | ||
5302 | } | ||
5303 | |||
4522 | #ifdef CONFIG_NET_POLL_CONTROLLER | 5304 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4523 | /* | 5305 | /* |
4524 | * Polling 'interrupt' - used by things like netconsole to send skbs | 5306 | * Polling 'interrupt' - used by things like netconsole to send skbs |
@@ -4552,9 +5334,14 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
4552 | .ndo_vlan_rx_register = ixgbe_vlan_rx_register, | 5334 | .ndo_vlan_rx_register = ixgbe_vlan_rx_register, |
4553 | .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, | 5335 | .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, |
4554 | .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, | 5336 | .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, |
5337 | .ndo_do_ioctl = ixgbe_ioctl, | ||
4555 | #ifdef CONFIG_NET_POLL_CONTROLLER | 5338 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4556 | .ndo_poll_controller = ixgbe_netpoll, | 5339 | .ndo_poll_controller = ixgbe_netpoll, |
4557 | #endif | 5340 | #endif |
5341 | #ifdef IXGBE_FCOE | ||
5342 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, | ||
5343 | .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, | ||
5344 | #endif /* IXGBE_FCOE */ | ||
4558 | }; | 5345 | }; |
4559 | 5346 | ||
4560 | /** | 5347 | /** |
@@ -4577,9 +5364,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4577 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; | 5364 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; |
4578 | static int cards_found; | 5365 | static int cards_found; |
4579 | int i, err, pci_using_dac; | 5366 | int i, err, pci_using_dac; |
5367 | #ifdef IXGBE_FCOE | ||
5368 | u16 device_caps; | ||
5369 | #endif | ||
4580 | u32 part_num, eec; | 5370 | u32 part_num, eec; |
4581 | 5371 | ||
4582 | err = pci_enable_device(pdev); | 5372 | err = pci_enable_device_mem(pdev); |
4583 | if (err) | 5373 | if (err) |
4584 | return err; | 5374 | return err; |
4585 | 5375 | ||
@@ -4599,9 +5389,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4599 | pci_using_dac = 0; | 5389 | pci_using_dac = 0; |
4600 | } | 5390 | } |
4601 | 5391 | ||
4602 | err = pci_request_regions(pdev, ixgbe_driver_name); | 5392 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, |
5393 | IORESOURCE_MEM), ixgbe_driver_name); | ||
4603 | if (err) { | 5394 | if (err) { |
4604 | dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); | 5395 | dev_err(&pdev->dev, |
5396 | "pci_request_selected_regions failed 0x%x\n", err); | ||
4605 | goto err_pci_reg; | 5397 | goto err_pci_reg; |
4606 | } | 5398 | } |
4607 | 5399 | ||
@@ -4665,6 +5457,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4665 | /* PHY */ | 5457 | /* PHY */ |
4666 | memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); | 5458 | memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); |
4667 | hw->phy.sfp_type = ixgbe_sfp_type_unknown; | 5459 | hw->phy.sfp_type = ixgbe_sfp_type_unknown; |
5460 | /* ixgbe_identify_phy_generic will set prtad and mmds properly */ | ||
5461 | hw->phy.mdio.prtad = MDIO_PRTAD_NONE; | ||
5462 | hw->phy.mdio.mmds = 0; | ||
5463 | hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | ||
5464 | hw->phy.mdio.dev = netdev; | ||
5465 | hw->phy.mdio.mdio_read = ixgbe_mdio_read; | ||
5466 | hw->phy.mdio.mdio_write = ixgbe_mdio_write; | ||
4668 | 5467 | ||
4669 | /* set up this timer and work struct before calling get_invariants | 5468 | /* set up this timer and work struct before calling get_invariants |
4670 | * which might start the timer | 5469 | * which might start the timer |
@@ -4682,29 +5481,42 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4682 | INIT_WORK(&adapter->sfp_config_module_task, | 5481 | INIT_WORK(&adapter->sfp_config_module_task, |
4683 | ixgbe_sfp_config_module_task); | 5482 | ixgbe_sfp_config_module_task); |
4684 | 5483 | ||
4685 | err = ii->get_invariants(hw); | 5484 | ii->get_invariants(hw); |
4686 | if (err == IXGBE_ERR_SFP_NOT_PRESENT) { | ||
4687 | /* start a kernel thread to watch for a module to arrive */ | ||
4688 | set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
4689 | mod_timer(&adapter->sfp_timer, | ||
4690 | round_jiffies(jiffies + (2 * HZ))); | ||
4691 | err = 0; | ||
4692 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | ||
4693 | DPRINTK(PROBE, ERR, "failed to load because an " | ||
4694 | "unsupported SFP+ module type was detected.\n"); | ||
4695 | goto err_hw_init; | ||
4696 | } else if (err) { | ||
4697 | goto err_hw_init; | ||
4698 | } | ||
4699 | 5485 | ||
4700 | /* setup the private structure */ | 5486 | /* setup the private structure */ |
4701 | err = ixgbe_sw_init(adapter); | 5487 | err = ixgbe_sw_init(adapter); |
4702 | if (err) | 5488 | if (err) |
4703 | goto err_sw_init; | 5489 | goto err_sw_init; |
4704 | 5490 | ||
5491 | /* | ||
5492 | * If there is a fan on this device and it has failed log the | ||
5493 | * failure. | ||
5494 | */ | ||
5495 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { | ||
5496 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||
5497 | if (esdp & IXGBE_ESDP_SDP1) | ||
5498 | DPRINTK(PROBE, CRIT, | ||
5499 | "Fan has stopped, replace the adapter\n"); | ||
5500 | } | ||
5501 | |||
4705 | /* reset_hw fills in the perm_addr as well */ | 5502 | /* reset_hw fills in the perm_addr as well */ |
4706 | err = hw->mac.ops.reset_hw(hw); | 5503 | err = hw->mac.ops.reset_hw(hw); |
4707 | if (err) { | 5504 | if (err == IXGBE_ERR_SFP_NOT_PRESENT && |
5505 | hw->mac.type == ixgbe_mac_82598EB) { | ||
5506 | /* | ||
5507 | * Start a kernel thread to watch for a module to arrive. | ||
5508 | * Only do this for 82598, since 82599 will generate | ||
5509 | * interrupts on module arrival. | ||
5510 | */ | ||
5511 | set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | ||
5512 | mod_timer(&adapter->sfp_timer, | ||
5513 | round_jiffies(jiffies + (2 * HZ))); | ||
5514 | err = 0; | ||
5515 | } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | ||
5516 | dev_err(&adapter->pdev->dev, "failed to load because an " | ||
5517 | "unsupported SFP+ module type was detected.\n"); | ||
5518 | goto err_sw_init; | ||
5519 | } else if (err) { | ||
4708 | dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); | 5520 | dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); |
4709 | goto err_sw_init; | 5521 | goto err_sw_init; |
4710 | } | 5522 | } |
@@ -4720,6 +5532,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4720 | netdev->features |= NETIF_F_TSO6; | 5532 | netdev->features |= NETIF_F_TSO6; |
4721 | netdev->features |= NETIF_F_GRO; | 5533 | netdev->features |= NETIF_F_GRO; |
4722 | 5534 | ||
5535 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | ||
5536 | netdev->features |= NETIF_F_SCTP_CSUM; | ||
5537 | |||
4723 | netdev->vlan_features |= NETIF_F_TSO; | 5538 | netdev->vlan_features |= NETIF_F_TSO; |
4724 | netdev->vlan_features |= NETIF_F_TSO6; | 5539 | netdev->vlan_features |= NETIF_F_TSO6; |
4725 | netdev->vlan_features |= NETIF_F_IP_CSUM; | 5540 | netdev->vlan_features |= NETIF_F_IP_CSUM; |
@@ -4732,9 +5547,32 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4732 | netdev->dcbnl_ops = &dcbnl_ops; | 5547 | netdev->dcbnl_ops = &dcbnl_ops; |
4733 | #endif | 5548 | #endif |
4734 | 5549 | ||
5550 | #ifdef IXGBE_FCOE | ||
5551 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
5552 | if (hw->mac.ops.get_device_caps) { | ||
5553 | hw->mac.ops.get_device_caps(hw, &device_caps); | ||
5554 | if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) { | ||
5555 | netdev->features |= NETIF_F_FCOE_CRC; | ||
5556 | netdev->features |= NETIF_F_FSO; | ||
5557 | netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; | ||
5558 | DPRINTK(DRV, INFO, "FCoE enabled, " | ||
5559 | "disabling Flow Director\n"); | ||
5560 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
5561 | adapter->flags &= | ||
5562 | ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
5563 | adapter->atr_sample_rate = 0; | ||
5564 | } else { | ||
5565 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; | ||
5566 | } | ||
5567 | } | ||
5568 | } | ||
5569 | #endif /* IXGBE_FCOE */ | ||
4735 | if (pci_using_dac) | 5570 | if (pci_using_dac) |
4736 | netdev->features |= NETIF_F_HIGHDMA; | 5571 | netdev->features |= NETIF_F_HIGHDMA; |
4737 | 5572 | ||
5573 | if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) | ||
5574 | netdev->features |= NETIF_F_LRO; | ||
5575 | |||
4738 | /* make sure the EEPROM is good */ | 5576 | /* make sure the EEPROM is good */ |
4739 | if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { | 5577 | if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { |
4740 | dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); | 5578 | dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); |
@@ -4766,6 +5604,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4766 | case IXGBE_DEV_ID_82599_KX4: | 5604 | case IXGBE_DEV_ID_82599_KX4: |
4767 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | | 5605 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | |
4768 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); | 5606 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); |
5607 | /* Enable ACPI wakeup in GRC */ | ||
5608 | IXGBE_WRITE_REG(hw, IXGBE_GRC, | ||
5609 | (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME)); | ||
4769 | break; | 5610 | break; |
4770 | default: | 5611 | default: |
4771 | adapter->wol = 0; | 5612 | adapter->wol = 0; |
@@ -4774,6 +5615,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4774 | device_init_wakeup(&adapter->pdev->dev, true); | 5615 | device_init_wakeup(&adapter->pdev->dev, true); |
4775 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | 5616 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); |
4776 | 5617 | ||
5618 | /* pick up the PCI bus settings for reporting later */ | ||
5619 | hw->mac.ops.get_bus_info(hw); | ||
5620 | |||
4777 | /* print bus type/speed/width info */ | 5621 | /* print bus type/speed/width info */ |
4778 | dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", | 5622 | dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", |
4779 | ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": | 5623 | ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": |
@@ -4805,24 +5649,37 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4805 | hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version); | 5649 | hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version); |
4806 | 5650 | ||
4807 | /* reset the hardware with the new settings */ | 5651 | /* reset the hardware with the new settings */ |
4808 | hw->mac.ops.start_hw(hw); | 5652 | err = hw->mac.ops.start_hw(hw); |
4809 | |||
4810 | netif_carrier_off(netdev); | ||
4811 | 5653 | ||
5654 | if (err == IXGBE_ERR_EEPROM_VERSION) { | ||
5655 | /* We are running on a pre-production device, log a warning */ | ||
5656 | dev_warn(&pdev->dev, "This device is a pre-production " | ||
5657 | "adapter/LOM. Please be aware there may be issues " | ||
5658 | "associated with your hardware. If you are " | ||
5659 | "experiencing problems please contact your Intel or " | ||
5660 | "hardware representative who provided you with this " | ||
5661 | "hardware.\n"); | ||
5662 | } | ||
4812 | strcpy(netdev->name, "eth%d"); | 5663 | strcpy(netdev->name, "eth%d"); |
4813 | err = register_netdev(netdev); | 5664 | err = register_netdev(netdev); |
4814 | if (err) | 5665 | if (err) |
4815 | goto err_register; | 5666 | goto err_register; |
4816 | 5667 | ||
5668 | /* carrier off reporting is important to ethtool even BEFORE open */ | ||
5669 | netif_carrier_off(netdev); | ||
5670 | |||
5671 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
5672 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
5673 | INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); | ||
5674 | |||
4817 | #ifdef CONFIG_IXGBE_DCA | 5675 | #ifdef CONFIG_IXGBE_DCA |
4818 | if (dca_add_requester(&pdev->dev) == 0) { | 5676 | if (dca_add_requester(&pdev->dev) == 0) { |
4819 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; | 5677 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
4820 | /* always use CB2 mode, difference is masked | ||
4821 | * in the CB driver */ | ||
4822 | IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2); | ||
4823 | ixgbe_setup_dca(adapter); | 5678 | ixgbe_setup_dca(adapter); |
4824 | } | 5679 | } |
4825 | #endif | 5680 | #endif |
5681 | /* add san mac addr to netdev */ | ||
5682 | ixgbe_add_sanmac_netdev(netdev); | ||
4826 | 5683 | ||
4827 | dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); | 5684 | dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); |
4828 | cards_found++; | 5685 | cards_found++; |
@@ -4830,9 +5687,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4830 | 5687 | ||
4831 | err_register: | 5688 | err_register: |
4832 | ixgbe_release_hw_control(adapter); | 5689 | ixgbe_release_hw_control(adapter); |
4833 | err_hw_init: | 5690 | ixgbe_clear_interrupt_scheme(adapter); |
4834 | err_sw_init: | 5691 | err_sw_init: |
4835 | ixgbe_reset_interrupt_capability(adapter); | ||
4836 | err_eeprom: | 5692 | err_eeprom: |
4837 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | 5693 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); |
4838 | del_timer_sync(&adapter->sfp_timer); | 5694 | del_timer_sync(&adapter->sfp_timer); |
@@ -4843,7 +5699,8 @@ err_eeprom: | |||
4843 | err_ioremap: | 5699 | err_ioremap: |
4844 | free_netdev(netdev); | 5700 | free_netdev(netdev); |
4845 | err_alloc_etherdev: | 5701 | err_alloc_etherdev: |
4846 | pci_release_regions(pdev); | 5702 | pci_release_selected_regions(pdev, pci_select_bars(pdev, |
5703 | IORESOURCE_MEM)); | ||
4847 | err_pci_reg: | 5704 | err_pci_reg: |
4848 | err_dma: | 5705 | err_dma: |
4849 | pci_disable_device(pdev); | 5706 | pci_disable_device(pdev); |
@@ -4877,6 +5734,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
4877 | cancel_work_sync(&adapter->sfp_task); | 5734 | cancel_work_sync(&adapter->sfp_task); |
4878 | cancel_work_sync(&adapter->multispeed_fiber_task); | 5735 | cancel_work_sync(&adapter->multispeed_fiber_task); |
4879 | cancel_work_sync(&adapter->sfp_config_module_task); | 5736 | cancel_work_sync(&adapter->sfp_config_module_task); |
5737 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
5738 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
5739 | cancel_work_sync(&adapter->fdir_reinit_task); | ||
4880 | flush_scheduled_work(); | 5740 | flush_scheduled_work(); |
4881 | 5741 | ||
4882 | #ifdef CONFIG_IXGBE_DCA | 5742 | #ifdef CONFIG_IXGBE_DCA |
@@ -4887,19 +5747,27 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
4887 | } | 5747 | } |
4888 | 5748 | ||
4889 | #endif | 5749 | #endif |
5750 | #ifdef IXGBE_FCOE | ||
5751 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | ||
5752 | ixgbe_cleanup_fcoe(adapter); | ||
5753 | |||
5754 | #endif /* IXGBE_FCOE */ | ||
5755 | |||
5756 | /* remove the added san mac */ | ||
5757 | ixgbe_del_sanmac_netdev(netdev); | ||
5758 | |||
4890 | if (netdev->reg_state == NETREG_REGISTERED) | 5759 | if (netdev->reg_state == NETREG_REGISTERED) |
4891 | unregister_netdev(netdev); | 5760 | unregister_netdev(netdev); |
4892 | 5761 | ||
4893 | ixgbe_reset_interrupt_capability(adapter); | 5762 | ixgbe_clear_interrupt_scheme(adapter); |
4894 | 5763 | ||
4895 | ixgbe_release_hw_control(adapter); | 5764 | ixgbe_release_hw_control(adapter); |
4896 | 5765 | ||
4897 | iounmap(adapter->hw.hw_addr); | 5766 | iounmap(adapter->hw.hw_addr); |
4898 | pci_release_regions(pdev); | 5767 | pci_release_selected_regions(pdev, pci_select_bars(pdev, |
5768 | IORESOURCE_MEM)); | ||
4899 | 5769 | ||
4900 | DPRINTK(PROBE, INFO, "complete\n"); | 5770 | DPRINTK(PROBE, INFO, "complete\n"); |
4901 | kfree(adapter->tx_ring); | ||
4902 | kfree(adapter->rx_ring); | ||
4903 | 5771 | ||
4904 | free_netdev(netdev); | 5772 | free_netdev(netdev); |
4905 | 5773 | ||
@@ -4927,6 +5795,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | |||
4927 | 5795 | ||
4928 | netif_device_detach(netdev); | 5796 | netif_device_detach(netdev); |
4929 | 5797 | ||
5798 | if (state == pci_channel_io_perm_failure) | ||
5799 | return PCI_ERS_RESULT_DISCONNECT; | ||
5800 | |||
4930 | if (netif_running(netdev)) | 5801 | if (netif_running(netdev)) |
4931 | ixgbe_down(adapter); | 5802 | ixgbe_down(adapter); |
4932 | pci_disable_device(pdev); | 5803 | pci_disable_device(pdev); |
@@ -4948,7 +5819,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | |||
4948 | pci_ers_result_t result; | 5819 | pci_ers_result_t result; |
4949 | int err; | 5820 | int err; |
4950 | 5821 | ||
4951 | if (pci_enable_device(pdev)) { | 5822 | if (pci_enable_device_mem(pdev)) { |
4952 | DPRINTK(PROBE, ERR, | 5823 | DPRINTK(PROBE, ERR, |
4953 | "Cannot re-enable PCI device after reset.\n"); | 5824 | "Cannot re-enable PCI device after reset.\n"); |
4954 | result = PCI_ERS_RESULT_DISCONNECT; | 5825 | result = PCI_ERS_RESULT_DISCONNECT; |
@@ -4956,8 +5827,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | |||
4956 | pci_set_master(pdev); | 5827 | pci_set_master(pdev); |
4957 | pci_restore_state(pdev); | 5828 | pci_restore_state(pdev); |
4958 | 5829 | ||
4959 | pci_enable_wake(pdev, PCI_D3hot, 0); | 5830 | pci_wake_from_d3(pdev, false); |
4960 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
4961 | 5831 | ||
4962 | ixgbe_reset(adapter); | 5832 | ixgbe_reset(adapter); |
4963 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); | 5833 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |