diff options
| author | Sasha Neftin <sasha.neftin@intel.com> | 2018-10-11 03:17:16 -0400 |
|---|---|---|
| committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2018-10-17 16:16:19 -0400 |
| commit | 3df25e4c1e66a69097bde99990fb095b26125c82 (patch) | |
| tree | 815872ea09f7f7857dddc411509607c18bd003a0 /drivers/net | |
| parent | c9a11c23ceb65db7ecc5735e7428311d70e74ba9 (diff) | |
igc: Add interrupt support
This patch set adds interrupt support for the igc interfaces.
Signed-off-by: Sasha Neftin <sasha.neftin@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net')
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc.h | 127 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_defines.h | 40 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_hw.h | 84 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/igc/igc_main.c | 1016 |
4 files changed, 1267 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 2e819cac19e5..e595d135ea7b 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h | |||
| @@ -28,6 +28,17 @@ | |||
| 28 | extern char igc_driver_name[]; | 28 | extern char igc_driver_name[]; |
| 29 | extern char igc_driver_version[]; | 29 | extern char igc_driver_version[]; |
| 30 | 30 | ||
| 31 | /* Interrupt defines */ | ||
| 32 | #define IGC_START_ITR 648 /* ~6000 ints/sec */ | ||
| 33 | #define IGC_FLAG_HAS_MSI BIT(0) | ||
| 34 | #define IGC_FLAG_QUEUE_PAIRS BIT(4) | ||
| 35 | #define IGC_FLAG_HAS_MSIX BIT(13) | ||
| 36 | |||
| 37 | #define IGC_START_ITR 648 /* ~6000 ints/sec */ | ||
| 38 | #define IGC_4K_ITR 980 | ||
| 39 | #define IGC_20K_ITR 196 | ||
| 40 | #define IGC_70K_ITR 56 | ||
| 41 | |||
| 31 | /* Transmit and receive queues */ | 42 | /* Transmit and receive queues */ |
| 32 | #define IGC_MAX_RX_QUEUES 4 | 43 | #define IGC_MAX_RX_QUEUES 4 |
| 33 | #define IGC_MAX_TX_QUEUES 4 | 44 | #define IGC_MAX_TX_QUEUES 4 |
| @@ -42,10 +53,96 @@ enum igc_state_t { | |||
| 42 | __IGC_PTP_TX_IN_PROGRESS, | 53 | __IGC_PTP_TX_IN_PROGRESS, |
| 43 | }; | 54 | }; |
| 44 | 55 | ||
| 56 | struct igc_tx_queue_stats { | ||
| 57 | u64 packets; | ||
| 58 | u64 bytes; | ||
| 59 | u64 restart_queue; | ||
| 60 | }; | ||
| 61 | |||
| 62 | struct igc_rx_queue_stats { | ||
| 63 | u64 packets; | ||
| 64 | u64 bytes; | ||
| 65 | u64 drops; | ||
| 66 | u64 csum_err; | ||
| 67 | u64 alloc_failed; | ||
| 68 | }; | ||
| 69 | |||
| 70 | struct igc_rx_packet_stats { | ||
| 71 | u64 ipv4_packets; /* IPv4 headers processed */ | ||
| 72 | u64 ipv4e_packets; /* IPv4E headers with extensions processed */ | ||
| 73 | u64 ipv6_packets; /* IPv6 headers processed */ | ||
| 74 | u64 ipv6e_packets; /* IPv6E headers with extensions processed */ | ||
| 75 | u64 tcp_packets; /* TCP headers processed */ | ||
| 76 | u64 udp_packets; /* UDP headers processed */ | ||
| 77 | u64 sctp_packets; /* SCTP headers processed */ | ||
| 78 | u64 nfs_packets; /* NFS headers processe */ | ||
| 79 | u64 other_packets; | ||
| 80 | }; | ||
| 81 | |||
| 82 | struct igc_ring_container { | ||
| 83 | struct igc_ring *ring; /* pointer to linked list of rings */ | ||
| 84 | unsigned int total_bytes; /* total bytes processed this int */ | ||
| 85 | unsigned int total_packets; /* total packets processed this int */ | ||
| 86 | u16 work_limit; /* total work allowed per interrupt */ | ||
| 87 | u8 count; /* total number of rings in vector */ | ||
| 88 | u8 itr; /* current ITR setting for ring */ | ||
| 89 | }; | ||
| 90 | |||
| 91 | struct igc_ring { | ||
| 92 | struct igc_q_vector *q_vector; /* backlink to q_vector */ | ||
| 93 | struct net_device *netdev; /* back pointer to net_device */ | ||
| 94 | struct device *dev; /* device for dma mapping */ | ||
| 95 | union { /* array of buffer info structs */ | ||
| 96 | struct igc_tx_buffer *tx_buffer_info; | ||
| 97 | struct igc_rx_buffer *rx_buffer_info; | ||
| 98 | }; | ||
| 99 | void *desc; /* descriptor ring memory */ | ||
| 100 | unsigned long flags; /* ring specific flags */ | ||
| 101 | void __iomem *tail; /* pointer to ring tail register */ | ||
| 102 | dma_addr_t dma; /* phys address of the ring */ | ||
| 103 | unsigned int size; /* length of desc. ring in bytes */ | ||
| 104 | |||
| 105 | u16 count; /* number of desc. in the ring */ | ||
| 106 | u8 queue_index; /* logical index of the ring*/ | ||
| 107 | u8 reg_idx; /* physical index of the ring */ | ||
| 108 | |||
| 109 | /* everything past this point are written often */ | ||
| 110 | u16 next_to_clean; | ||
| 111 | u16 next_to_use; | ||
| 112 | u16 next_to_alloc; | ||
| 113 | |||
| 114 | union { | ||
| 115 | /* TX */ | ||
| 116 | struct { | ||
| 117 | struct igc_tx_queue_stats tx_stats; | ||
| 118 | }; | ||
| 119 | /* RX */ | ||
| 120 | struct { | ||
| 121 | struct igc_rx_queue_stats rx_stats; | ||
| 122 | struct igc_rx_packet_stats pkt_stats; | ||
| 123 | struct sk_buff *skb; | ||
| 124 | }; | ||
| 125 | }; | ||
| 126 | } ____cacheline_internodealigned_in_smp; | ||
| 127 | |||
| 45 | struct igc_q_vector { | 128 | struct igc_q_vector { |
| 46 | struct igc_adapter *adapter; /* backlink */ | 129 | struct igc_adapter *adapter; /* backlink */ |
| 130 | void __iomem *itr_register; | ||
| 131 | u32 eims_value; /* EIMS mask value */ | ||
| 132 | |||
| 133 | u16 itr_val; | ||
| 134 | u8 set_itr; | ||
| 135 | |||
| 136 | struct igc_ring_container rx, tx; | ||
| 47 | 137 | ||
| 48 | struct napi_struct napi; | 138 | struct napi_struct napi; |
| 139 | |||
| 140 | struct rcu_head rcu; /* to avoid race with update stats on free */ | ||
| 141 | char name[IFNAMSIZ + 9]; | ||
| 142 | struct net_device poll_dev; | ||
| 143 | |||
| 144 | /* for dynamic allocation of rings associated with this q_vector */ | ||
| 145 | struct igc_ring ring[0] ____cacheline_internodealigned_in_smp; | ||
| 49 | }; | 146 | }; |
| 50 | 147 | ||
| 51 | struct igc_mac_addr { | 148 | struct igc_mac_addr { |
| @@ -65,13 +162,35 @@ struct igc_adapter { | |||
| 65 | unsigned long state; | 162 | unsigned long state; |
| 66 | unsigned int flags; | 163 | unsigned int flags; |
| 67 | unsigned int num_q_vectors; | 164 | unsigned int num_q_vectors; |
| 165 | |||
| 166 | struct msix_entry *msix_entries; | ||
| 167 | |||
| 168 | /* TX */ | ||
| 169 | u16 tx_work_limit; | ||
| 170 | int num_tx_queues; | ||
| 171 | struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES]; | ||
| 172 | |||
| 173 | /* RX */ | ||
| 174 | int num_rx_queues; | ||
| 175 | struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES]; | ||
| 176 | |||
| 177 | struct timer_list watchdog_timer; | ||
| 178 | struct timer_list dma_err_timer; | ||
| 179 | struct timer_list phy_info_timer; | ||
| 180 | |||
| 68 | u16 link_speed; | 181 | u16 link_speed; |
| 69 | u16 link_duplex; | 182 | u16 link_duplex; |
| 70 | 183 | ||
| 71 | u8 port_num; | 184 | u8 port_num; |
| 72 | 185 | ||
| 73 | u8 __iomem *io_addr; | 186 | u8 __iomem *io_addr; |
| 187 | /* Interrupt Throttle Rate */ | ||
| 188 | u32 rx_itr_setting; | ||
| 189 | u32 tx_itr_setting; | ||
| 190 | |||
| 191 | struct work_struct reset_task; | ||
| 74 | struct work_struct watchdog_task; | 192 | struct work_struct watchdog_task; |
| 193 | struct work_struct dma_err_task; | ||
| 75 | 194 | ||
| 76 | int msg_enable; | 195 | int msg_enable; |
| 77 | u32 max_frame_size; | 196 | u32 max_frame_size; |
| @@ -81,8 +200,16 @@ struct igc_adapter { | |||
| 81 | 200 | ||
| 82 | /* structs defined in igc_hw.h */ | 201 | /* structs defined in igc_hw.h */ |
| 83 | struct igc_hw hw; | 202 | struct igc_hw hw; |
| 203 | struct igc_hw_stats stats; | ||
| 84 | 204 | ||
| 85 | struct igc_q_vector *q_vector[MAX_Q_VECTORS]; | 205 | struct igc_q_vector *q_vector[MAX_Q_VECTORS]; |
| 206 | u32 eims_enable_mask; | ||
| 207 | u32 eims_other; | ||
| 208 | |||
| 209 | u16 tx_ring_count; | ||
| 210 | u16 rx_ring_count; | ||
| 211 | |||
| 212 | u32 rss_queues; | ||
| 86 | 213 | ||
| 87 | struct igc_mac_addr *mac_table; | 214 | struct igc_mac_addr *mac_table; |
| 88 | }; | 215 | }; |
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index c25f75ed9cd4..7262ad44dcf8 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h | |||
| @@ -42,4 +42,44 @@ | |||
| 42 | #define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ | 42 | #define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ |
| 43 | #define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ | 43 | #define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ |
| 44 | 44 | ||
| 45 | /* Interrupt Cause Read */ | ||
| 46 | #define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */ | ||
| 47 | #define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */ | ||
| 48 | #define IGC_ICR_LSC BIT(2) /* Link Status Change */ | ||
| 49 | #define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */ | ||
| 50 | #define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */ | ||
| 51 | #define IGC_ICR_RXO BIT(6) /* Rx overrun */ | ||
| 52 | #define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */ | ||
| 53 | #define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */ | ||
| 54 | #define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ | ||
| 55 | |||
| 56 | #define IMS_ENABLE_MASK ( \ | ||
| 57 | IGC_IMS_RXT0 | \ | ||
| 58 | IGC_IMS_TXDW | \ | ||
| 59 | IGC_IMS_RXDMT0 | \ | ||
| 60 | IGC_IMS_RXSEQ | \ | ||
| 61 | IGC_IMS_LSC) | ||
| 62 | |||
| 63 | /* Interrupt Mask Set */ | ||
| 64 | #define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ | ||
| 65 | #define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ | ||
| 66 | #define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ | ||
| 67 | #define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ | ||
| 68 | #define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ | ||
| 69 | #define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ | ||
| 70 | #define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ | ||
| 71 | |||
| 72 | #define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ | ||
| 73 | #define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ | ||
| 74 | |||
| 75 | #define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ | ||
| 76 | #define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ | ||
| 77 | #define IGC_IVAR_VALID 0x80 | ||
| 78 | #define IGC_GPIE_NSICR 0x00000001 | ||
| 79 | #define IGC_GPIE_MSIX_MODE 0x00000010 | ||
| 80 | #define IGC_GPIE_EIAME 0x40000000 | ||
| 81 | #define IGC_GPIE_PBA 0x80000000 | ||
| 82 | |||
| 83 | #define IGC_N0_QUEUE -1 | ||
| 84 | |||
| 45 | #endif /* _IGC_DEFINES_H_ */ | 85 | #endif /* _IGC_DEFINES_H_ */ |
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 4cac2e8868e0..3905efb1bb1b 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h | |||
| @@ -85,6 +85,90 @@ struct igc_hw { | |||
| 85 | u8 revision_id; | 85 | u8 revision_id; |
| 86 | }; | 86 | }; |
| 87 | 87 | ||
| 88 | /* Statistics counters collected by the MAC */ | ||
| 89 | struct igc_hw_stats { | ||
| 90 | u64 crcerrs; | ||
| 91 | u64 algnerrc; | ||
| 92 | u64 symerrs; | ||
| 93 | u64 rxerrc; | ||
| 94 | u64 mpc; | ||
| 95 | u64 scc; | ||
| 96 | u64 ecol; | ||
| 97 | u64 mcc; | ||
| 98 | u64 latecol; | ||
| 99 | u64 colc; | ||
| 100 | u64 dc; | ||
| 101 | u64 tncrs; | ||
| 102 | u64 sec; | ||
| 103 | u64 cexterr; | ||
| 104 | u64 rlec; | ||
| 105 | u64 xonrxc; | ||
| 106 | u64 xontxc; | ||
| 107 | u64 xoffrxc; | ||
| 108 | u64 xofftxc; | ||
| 109 | u64 fcruc; | ||
| 110 | u64 prc64; | ||
| 111 | u64 prc127; | ||
| 112 | u64 prc255; | ||
| 113 | u64 prc511; | ||
| 114 | u64 prc1023; | ||
| 115 | u64 prc1522; | ||
| 116 | u64 gprc; | ||
| 117 | u64 bprc; | ||
| 118 | u64 mprc; | ||
| 119 | u64 gptc; | ||
| 120 | u64 gorc; | ||
| 121 | u64 gotc; | ||
| 122 | u64 rnbc; | ||
| 123 | u64 ruc; | ||
| 124 | u64 rfc; | ||
| 125 | u64 roc; | ||
| 126 | u64 rjc; | ||
| 127 | u64 mgprc; | ||
| 128 | u64 mgpdc; | ||
| 129 | u64 mgptc; | ||
| 130 | u64 tor; | ||
| 131 | u64 tot; | ||
| 132 | u64 tpr; | ||
| 133 | u64 tpt; | ||
| 134 | u64 ptc64; | ||
| 135 | u64 ptc127; | ||
| 136 | u64 ptc255; | ||
| 137 | u64 ptc511; | ||
| 138 | u64 ptc1023; | ||
| 139 | u64 ptc1522; | ||
| 140 | u64 mptc; | ||
| 141 | u64 bptc; | ||
| 142 | u64 tsctc; | ||
| 143 | u64 tsctfc; | ||
| 144 | u64 iac; | ||
| 145 | u64 icrxptc; | ||
| 146 | u64 icrxatc; | ||
| 147 | u64 ictxptc; | ||
| 148 | u64 ictxatc; | ||
| 149 | u64 ictxqec; | ||
| 150 | u64 ictxqmtc; | ||
| 151 | u64 icrxdmtc; | ||
| 152 | u64 icrxoc; | ||
| 153 | u64 cbtmpc; | ||
| 154 | u64 htdpmc; | ||
| 155 | u64 cbrdpc; | ||
| 156 | u64 cbrmpc; | ||
| 157 | u64 rpthc; | ||
| 158 | u64 hgptc; | ||
| 159 | u64 htcbdpc; | ||
| 160 | u64 hgorc; | ||
| 161 | u64 hgotc; | ||
| 162 | u64 lenerrs; | ||
| 163 | u64 scvpc; | ||
| 164 | u64 hrmpc; | ||
| 165 | u64 doosync; | ||
| 166 | u64 o2bgptc; | ||
| 167 | u64 o2bspc; | ||
| 168 | u64 b2ospc; | ||
| 169 | u64 b2ogprc; | ||
| 170 | }; | ||
| 171 | |||
| 88 | s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); | 172 | s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); |
| 89 | s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); | 173 | s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value); |
| 90 | void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); | 174 | void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value); |
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 7c5b0d2f16bf..0fd66620cfa1 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c | |||
| @@ -41,6 +41,22 @@ static int igc_sw_init(struct igc_adapter *); | |||
| 41 | static void igc_configure(struct igc_adapter *adapter); | 41 | static void igc_configure(struct igc_adapter *adapter); |
| 42 | static void igc_power_down_link(struct igc_adapter *adapter); | 42 | static void igc_power_down_link(struct igc_adapter *adapter); |
| 43 | static void igc_set_default_mac_filter(struct igc_adapter *adapter); | 43 | static void igc_set_default_mac_filter(struct igc_adapter *adapter); |
| 44 | static void igc_write_itr(struct igc_q_vector *q_vector); | ||
| 45 | static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector); | ||
| 46 | static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx); | ||
| 47 | static void igc_set_interrupt_capability(struct igc_adapter *adapter, | ||
| 48 | bool msix); | ||
| 49 | static void igc_free_q_vectors(struct igc_adapter *adapter); | ||
| 50 | static void igc_irq_disable(struct igc_adapter *adapter); | ||
| 51 | static void igc_irq_enable(struct igc_adapter *adapter); | ||
| 52 | static void igc_configure_msix(struct igc_adapter *adapter); | ||
| 53 | |||
| 54 | enum latency_range { | ||
| 55 | lowest_latency = 0, | ||
| 56 | low_latency = 1, | ||
| 57 | bulk_latency = 2, | ||
| 58 | latency_invalid = 255 | ||
| 59 | }; | ||
| 44 | 60 | ||
| 45 | static void igc_reset(struct igc_adapter *adapter) | 61 | static void igc_reset(struct igc_adapter *adapter) |
| 46 | { | 62 | { |
| @@ -154,6 +170,7 @@ static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
| 154 | */ | 170 | */ |
| 155 | static void igc_up(struct igc_adapter *adapter) | 171 | static void igc_up(struct igc_adapter *adapter) |
| 156 | { | 172 | { |
| 173 | struct igc_hw *hw = &adapter->hw; | ||
| 157 | int i = 0; | 174 | int i = 0; |
| 158 | 175 | ||
| 159 | /* hardware has been reset, we need to reload some things */ | 176 | /* hardware has been reset, we need to reload some things */ |
| @@ -163,6 +180,15 @@ static void igc_up(struct igc_adapter *adapter) | |||
| 163 | 180 | ||
| 164 | for (i = 0; i < adapter->num_q_vectors; i++) | 181 | for (i = 0; i < adapter->num_q_vectors; i++) |
| 165 | napi_enable(&adapter->q_vector[i]->napi); | 182 | napi_enable(&adapter->q_vector[i]->napi); |
| 183 | |||
| 184 | if (adapter->msix_entries) | ||
| 185 | igc_configure_msix(adapter); | ||
| 186 | else | ||
| 187 | igc_assign_vector(adapter->q_vector[0], 0); | ||
| 188 | |||
| 189 | /* Clear any pending interrupts. */ | ||
| 190 | rd32(IGC_ICR); | ||
| 191 | igc_irq_enable(adapter); | ||
| 166 | } | 192 | } |
| 167 | 193 | ||
| 168 | /** | 194 | /** |
| @@ -310,6 +336,958 @@ static void igc_set_default_mac_filter(struct igc_adapter *adapter) | |||
| 310 | } | 336 | } |
| 311 | 337 | ||
| 312 | /** | 338 | /** |
| 339 | * igc_msix_other - msix other interrupt handler | ||
| 340 | * @irq: interrupt number | ||
| 341 | * @data: pointer to a q_vector | ||
| 342 | */ | ||
| 343 | static irqreturn_t igc_msix_other(int irq, void *data) | ||
| 344 | { | ||
| 345 | struct igc_adapter *adapter = data; | ||
| 346 | struct igc_hw *hw = &adapter->hw; | ||
| 347 | u32 icr = rd32(IGC_ICR); | ||
| 348 | |||
| 349 | /* reading ICR causes bit 31 of EICR to be cleared */ | ||
| 350 | if (icr & IGC_ICR_DRSTA) | ||
| 351 | schedule_work(&adapter->reset_task); | ||
| 352 | |||
| 353 | if (icr & IGC_ICR_DOUTSYNC) { | ||
| 354 | /* HW is reporting DMA is out of sync */ | ||
| 355 | adapter->stats.doosync++; | ||
| 356 | } | ||
| 357 | |||
| 358 | if (icr & IGC_ICR_LSC) { | ||
| 359 | hw->mac.get_link_status = 1; | ||
| 360 | /* guard against interrupt when we're going down */ | ||
| 361 | if (!test_bit(__IGC_DOWN, &adapter->state)) | ||
| 362 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | ||
| 363 | } | ||
| 364 | |||
| 365 | wr32(IGC_EIMS, adapter->eims_other); | ||
| 366 | |||
| 367 | return IRQ_HANDLED; | ||
| 368 | } | ||
| 369 | |||
| 370 | /** | ||
| 371 | * igc_write_ivar - configure ivar for given MSI-X vector | ||
| 372 | * @hw: pointer to the HW structure | ||
| 373 | * @msix_vector: vector number we are allocating to a given ring | ||
| 374 | * @index: row index of IVAR register to write within IVAR table | ||
| 375 | * @offset: column offset of in IVAR, should be multiple of 8 | ||
| 376 | * | ||
| 377 | * The IVAR table consists of 2 columns, | ||
| 378 | * each containing an cause allocation for an Rx and Tx ring, and a | ||
| 379 | * variable number of rows depending on the number of queues supported. | ||
| 380 | */ | ||
| 381 | static void igc_write_ivar(struct igc_hw *hw, int msix_vector, | ||
| 382 | int index, int offset) | ||
| 383 | { | ||
| 384 | u32 ivar = array_rd32(IGC_IVAR0, index); | ||
| 385 | |||
| 386 | /* clear any bits that are currently set */ | ||
| 387 | ivar &= ~((u32)0xFF << offset); | ||
| 388 | |||
| 389 | /* write vector and valid bit */ | ||
| 390 | ivar |= (msix_vector | IGC_IVAR_VALID) << offset; | ||
| 391 | |||
| 392 | array_wr32(IGC_IVAR0, index, ivar); | ||
| 393 | } | ||
| 394 | |||
| 395 | static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) | ||
| 396 | { | ||
| 397 | struct igc_adapter *adapter = q_vector->adapter; | ||
| 398 | struct igc_hw *hw = &adapter->hw; | ||
| 399 | int rx_queue = IGC_N0_QUEUE; | ||
| 400 | int tx_queue = IGC_N0_QUEUE; | ||
| 401 | |||
| 402 | if (q_vector->rx.ring) | ||
| 403 | rx_queue = q_vector->rx.ring->reg_idx; | ||
| 404 | if (q_vector->tx.ring) | ||
| 405 | tx_queue = q_vector->tx.ring->reg_idx; | ||
| 406 | |||
| 407 | switch (hw->mac.type) { | ||
| 408 | case igc_i225: | ||
| 409 | if (rx_queue > IGC_N0_QUEUE) | ||
| 410 | igc_write_ivar(hw, msix_vector, | ||
| 411 | rx_queue >> 1, | ||
| 412 | (rx_queue & 0x1) << 4); | ||
| 413 | if (tx_queue > IGC_N0_QUEUE) | ||
| 414 | igc_write_ivar(hw, msix_vector, | ||
| 415 | tx_queue >> 1, | ||
| 416 | ((tx_queue & 0x1) << 4) + 8); | ||
| 417 | q_vector->eims_value = BIT(msix_vector); | ||
| 418 | break; | ||
| 419 | default: | ||
| 420 | WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); | ||
| 421 | break; | ||
| 422 | } | ||
| 423 | |||
| 424 | /* add q_vector eims value to global eims_enable_mask */ | ||
| 425 | adapter->eims_enable_mask |= q_vector->eims_value; | ||
| 426 | |||
| 427 | /* configure q_vector to set itr on first interrupt */ | ||
| 428 | q_vector->set_itr = 1; | ||
| 429 | } | ||
| 430 | |||
| 431 | /** | ||
| 432 | * igc_configure_msix - Configure MSI-X hardware | ||
| 433 | * @adapter: Pointer to adapter structure | ||
| 434 | * | ||
| 435 | * igc_configure_msix sets up the hardware to properly | ||
| 436 | * generate MSI-X interrupts. | ||
| 437 | */ | ||
| 438 | static void igc_configure_msix(struct igc_adapter *adapter) | ||
| 439 | { | ||
| 440 | struct igc_hw *hw = &adapter->hw; | ||
| 441 | int i, vector = 0; | ||
| 442 | u32 tmp; | ||
| 443 | |||
| 444 | adapter->eims_enable_mask = 0; | ||
| 445 | |||
| 446 | /* set vector for other causes, i.e. link changes */ | ||
| 447 | switch (hw->mac.type) { | ||
| 448 | case igc_i225: | ||
| 449 | /* Turn on MSI-X capability first, or our settings | ||
| 450 | * won't stick. And it will take days to debug. | ||
| 451 | */ | ||
| 452 | wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | | ||
| 453 | IGC_GPIE_PBA | IGC_GPIE_EIAME | | ||
| 454 | IGC_GPIE_NSICR); | ||
| 455 | |||
| 456 | /* enable msix_other interrupt */ | ||
| 457 | adapter->eims_other = BIT(vector); | ||
| 458 | tmp = (vector++ | IGC_IVAR_VALID) << 8; | ||
| 459 | |||
| 460 | wr32(IGC_IVAR_MISC, tmp); | ||
| 461 | break; | ||
| 462 | default: | ||
| 463 | /* do nothing, since nothing else supports MSI-X */ | ||
| 464 | break; | ||
| 465 | } /* switch (hw->mac.type) */ | ||
| 466 | |||
| 467 | adapter->eims_enable_mask |= adapter->eims_other; | ||
| 468 | |||
| 469 | for (i = 0; i < adapter->num_q_vectors; i++) | ||
| 470 | igc_assign_vector(adapter->q_vector[i], vector++); | ||
| 471 | |||
| 472 | wrfl(); | ||
| 473 | } | ||
| 474 | |||
| 475 | static irqreturn_t igc_msix_ring(int irq, void *data) | ||
| 476 | { | ||
| 477 | struct igc_q_vector *q_vector = data; | ||
| 478 | |||
| 479 | /* Write the ITR value calculated from the previous interrupt. */ | ||
| 480 | igc_write_itr(q_vector); | ||
| 481 | |||
| 482 | napi_schedule(&q_vector->napi); | ||
| 483 | |||
| 484 | return IRQ_HANDLED; | ||
| 485 | } | ||
| 486 | |||
| 487 | /** | ||
| 488 | * igc_request_msix - Initialize MSI-X interrupts | ||
| 489 | * @adapter: Pointer to adapter structure | ||
| 490 | * | ||
| 491 | * igc_request_msix allocates MSI-X vectors and requests interrupts from the | ||
| 492 | * kernel. | ||
| 493 | */ | ||
| 494 | static int igc_request_msix(struct igc_adapter *adapter) | ||
| 495 | { | ||
| 496 | int i = 0, err = 0, vector = 0, free_vector = 0; | ||
| 497 | struct net_device *netdev = adapter->netdev; | ||
| 498 | |||
| 499 | err = request_irq(adapter->msix_entries[vector].vector, | ||
| 500 | &igc_msix_other, 0, netdev->name, adapter); | ||
| 501 | if (err) | ||
| 502 | goto err_out; | ||
| 503 | |||
| 504 | for (i = 0; i < adapter->num_q_vectors; i++) { | ||
| 505 | struct igc_q_vector *q_vector = adapter->q_vector[i]; | ||
| 506 | |||
| 507 | vector++; | ||
| 508 | |||
| 509 | q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); | ||
| 510 | |||
| 511 | if (q_vector->rx.ring && q_vector->tx.ring) | ||
| 512 | sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, | ||
| 513 | q_vector->rx.ring->queue_index); | ||
| 514 | else if (q_vector->tx.ring) | ||
| 515 | sprintf(q_vector->name, "%s-tx-%u", netdev->name, | ||
| 516 | q_vector->tx.ring->queue_index); | ||
| 517 | else if (q_vector->rx.ring) | ||
| 518 | sprintf(q_vector->name, "%s-rx-%u", netdev->name, | ||
| 519 | q_vector->rx.ring->queue_index); | ||
| 520 | else | ||
| 521 | sprintf(q_vector->name, "%s-unused", netdev->name); | ||
| 522 | |||
| 523 | err = request_irq(adapter->msix_entries[vector].vector, | ||
| 524 | igc_msix_ring, 0, q_vector->name, | ||
| 525 | q_vector); | ||
| 526 | if (err) | ||
| 527 | goto err_free; | ||
| 528 | } | ||
| 529 | |||
| 530 | igc_configure_msix(adapter); | ||
| 531 | return 0; | ||
| 532 | |||
| 533 | err_free: | ||
| 534 | /* free already assigned IRQs */ | ||
| 535 | free_irq(adapter->msix_entries[free_vector++].vector, adapter); | ||
| 536 | |||
| 537 | vector--; | ||
| 538 | for (i = 0; i < vector; i++) { | ||
| 539 | free_irq(adapter->msix_entries[free_vector++].vector, | ||
| 540 | adapter->q_vector[i]); | ||
| 541 | } | ||
| 542 | err_out: | ||
| 543 | return err; | ||
| 544 | } | ||
| 545 | |||
| 546 | /** | ||
| 547 | * igc_reset_q_vector - Reset config for interrupt vector | ||
| 548 | * @adapter: board private structure to initialize | ||
| 549 | * @v_idx: Index of vector to be reset | ||
| 550 | * | ||
| 551 | * If NAPI is enabled it will delete any references to the | ||
| 552 | * NAPI struct. This is preparation for igc_free_q_vector. | ||
| 553 | */ | ||
| 554 | static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) | ||
| 555 | { | ||
| 556 | struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
| 557 | |||
| 558 | /* if we're coming from igc_set_interrupt_capability, the vectors are | ||
| 559 | * not yet allocated | ||
| 560 | */ | ||
| 561 | if (!q_vector) | ||
| 562 | return; | ||
| 563 | |||
| 564 | if (q_vector->tx.ring) | ||
| 565 | adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; | ||
| 566 | |||
| 567 | if (q_vector->rx.ring) | ||
| 568 | adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; | ||
| 569 | |||
| 570 | netif_napi_del(&q_vector->napi); | ||
| 571 | } | ||
| 572 | |||
| 573 | static void igc_reset_interrupt_capability(struct igc_adapter *adapter) | ||
| 574 | { | ||
| 575 | int v_idx = adapter->num_q_vectors; | ||
| 576 | |||
| 577 | if (adapter->msix_entries) { | ||
| 578 | pci_disable_msix(adapter->pdev); | ||
| 579 | kfree(adapter->msix_entries); | ||
| 580 | adapter->msix_entries = NULL; | ||
| 581 | } else if (adapter->flags & IGC_FLAG_HAS_MSI) { | ||
| 582 | pci_disable_msi(adapter->pdev); | ||
| 583 | } | ||
| 584 | |||
| 585 | while (v_idx--) | ||
| 586 | igc_reset_q_vector(adapter, v_idx); | ||
| 587 | } | ||
| 588 | |||
| 589 | /** | ||
| 590 | * igc_clear_interrupt_scheme - reset the device to a state of no interrupts | ||
| 591 | * @adapter: Pointer to adapter structure | ||
| 592 | * | ||
| 593 | * This function resets the device so that it has 0 rx queues, tx queues, and | ||
| 594 | * MSI-X interrupts allocated. | ||
| 595 | */ | ||
| 596 | static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) | ||
| 597 | { | ||
| 598 | igc_free_q_vectors(adapter); | ||
| 599 | igc_reset_interrupt_capability(adapter); | ||
| 600 | } | ||
| 601 | |||
| 602 | /** | ||
| 603 | * igc_free_q_vectors - Free memory allocated for interrupt vectors | ||
| 604 | * @adapter: board private structure to initialize | ||
| 605 | * | ||
| 606 | * This function frees the memory allocated to the q_vectors. In addition if | ||
| 607 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
| 608 | * to freeing the q_vector. | ||
| 609 | */ | ||
| 610 | static void igc_free_q_vectors(struct igc_adapter *adapter) | ||
| 611 | { | ||
| 612 | int v_idx = adapter->num_q_vectors; | ||
| 613 | |||
| 614 | adapter->num_tx_queues = 0; | ||
| 615 | adapter->num_rx_queues = 0; | ||
| 616 | adapter->num_q_vectors = 0; | ||
| 617 | |||
| 618 | while (v_idx--) { | ||
| 619 | igc_reset_q_vector(adapter, v_idx); | ||
| 620 | igc_free_q_vector(adapter, v_idx); | ||
| 621 | } | ||
| 622 | } | ||
| 623 | |||
| 624 | /** | ||
| 625 | * igc_free_q_vector - Free memory allocated for specific interrupt vector | ||
| 626 | * @adapter: board private structure to initialize | ||
| 627 | * @v_idx: Index of vector to be freed | ||
| 628 | * | ||
| 629 | * This function frees the memory allocated to the q_vector. | ||
| 630 | */ | ||
| 631 | static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) | ||
| 632 | { | ||
| 633 | struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
| 634 | |||
| 635 | adapter->q_vector[v_idx] = NULL; | ||
| 636 | |||
| 637 | /* igc_get_stats64() might access the rings on this vector, | ||
| 638 | * we must wait a grace period before freeing it. | ||
| 639 | */ | ||
| 640 | if (q_vector) | ||
| 641 | kfree_rcu(q_vector, rcu); | ||
| 642 | } | ||
| 643 | |||
| 644 | /** | ||
| 645 | * igc_update_ring_itr - update the dynamic ITR value based on packet size | ||
| 646 | * @q_vector: pointer to q_vector | ||
| 647 | * | ||
| 648 | * Stores a new ITR value based on strictly on packet size. This | ||
| 649 | * algorithm is less sophisticated than that used in igc_update_itr, | ||
| 650 | * due to the difficulty of synchronizing statistics across multiple | ||
| 651 | * receive rings. The divisors and thresholds used by this function | ||
| 652 | * were determined based on theoretical maximum wire speed and testing | ||
| 653 | * data, in order to minimize response time while increasing bulk | ||
| 654 | * throughput. | ||
| 655 | * NOTE: This function is called only when operating in a multiqueue | ||
| 656 | * receive environment. | ||
| 657 | */ | ||
| 658 | static void igc_update_ring_itr(struct igc_q_vector *q_vector) | ||
| 659 | { | ||
| 660 | struct igc_adapter *adapter = q_vector->adapter; | ||
| 661 | int new_val = q_vector->itr_val; | ||
| 662 | int avg_wire_size = 0; | ||
| 663 | unsigned int packets; | ||
| 664 | |||
| 665 | /* For non-gigabit speeds, just fix the interrupt rate at 4000 | ||
| 666 | * ints/sec - ITR timer value of 120 ticks. | ||
| 667 | */ | ||
| 668 | switch (adapter->link_speed) { | ||
| 669 | case SPEED_10: | ||
| 670 | case SPEED_100: | ||
| 671 | new_val = IGC_4K_ITR; | ||
| 672 | goto set_itr_val; | ||
| 673 | default: | ||
| 674 | break; | ||
| 675 | } | ||
| 676 | |||
| 677 | packets = q_vector->rx.total_packets; | ||
| 678 | if (packets) | ||
| 679 | avg_wire_size = q_vector->rx.total_bytes / packets; | ||
| 680 | |||
| 681 | packets = q_vector->tx.total_packets; | ||
| 682 | if (packets) | ||
| 683 | avg_wire_size = max_t(u32, avg_wire_size, | ||
| 684 | q_vector->tx.total_bytes / packets); | ||
| 685 | |||
| 686 | /* if avg_wire_size isn't set no work was done */ | ||
| 687 | if (!avg_wire_size) | ||
| 688 | goto clear_counts; | ||
| 689 | |||
| 690 | /* Add 24 bytes to size to account for CRC, preamble, and gap */ | ||
| 691 | avg_wire_size += 24; | ||
| 692 | |||
| 693 | /* Don't starve jumbo frames */ | ||
| 694 | avg_wire_size = min(avg_wire_size, 3000); | ||
| 695 | |||
| 696 | /* Give a little boost to mid-size frames */ | ||
| 697 | if (avg_wire_size > 300 && avg_wire_size < 1200) | ||
| 698 | new_val = avg_wire_size / 3; | ||
| 699 | else | ||
| 700 | new_val = avg_wire_size / 2; | ||
| 701 | |||
| 702 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | ||
| 703 | if (new_val < IGC_20K_ITR && | ||
| 704 | ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || | ||
| 705 | (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) | ||
| 706 | new_val = IGC_20K_ITR; | ||
| 707 | |||
| 708 | set_itr_val: | ||
| 709 | if (new_val != q_vector->itr_val) { | ||
| 710 | q_vector->itr_val = new_val; | ||
| 711 | q_vector->set_itr = 1; | ||
| 712 | } | ||
| 713 | clear_counts: | ||
| 714 | q_vector->rx.total_bytes = 0; | ||
| 715 | q_vector->rx.total_packets = 0; | ||
| 716 | q_vector->tx.total_bytes = 0; | ||
| 717 | q_vector->tx.total_packets = 0; | ||
| 718 | } | ||
| 719 | |||
| 720 | /** | ||
| 721 | * igc_update_itr - update the dynamic ITR value based on statistics | ||
| 722 | * @q_vector: pointer to q_vector | ||
| 723 | * @ring_container: ring info to update the itr for | ||
| 724 | * | ||
| 725 | * Stores a new ITR value based on packets and byte | ||
| 726 | * counts during the last interrupt. The advantage of per interrupt | ||
| 727 | * computation is faster updates and more accurate ITR for the current | ||
| 728 | * traffic pattern. Constants in this function were computed | ||
| 729 | * based on theoretical maximum wire speed and thresholds were set based | ||
| 730 | * on testing data as well as attempting to minimize response time | ||
| 731 | * while increasing bulk throughput. | ||
| 732 | * NOTE: These calculations are only valid when operating in a single- | ||
| 733 | * queue environment. | ||
| 734 | */ | ||
| 735 | static void igc_update_itr(struct igc_q_vector *q_vector, | ||
| 736 | struct igc_ring_container *ring_container) | ||
| 737 | { | ||
| 738 | unsigned int packets = ring_container->total_packets; | ||
| 739 | unsigned int bytes = ring_container->total_bytes; | ||
| 740 | u8 itrval = ring_container->itr; | ||
| 741 | |||
| 742 | /* no packets, exit with status unchanged */ | ||
| 743 | if (packets == 0) | ||
| 744 | return; | ||
| 745 | |||
| 746 | switch (itrval) { | ||
| 747 | case lowest_latency: | ||
| 748 | /* handle TSO and jumbo frames */ | ||
| 749 | if (bytes / packets > 8000) | ||
| 750 | itrval = bulk_latency; | ||
| 751 | else if ((packets < 5) && (bytes > 512)) | ||
| 752 | itrval = low_latency; | ||
| 753 | break; | ||
| 754 | case low_latency: /* 50 usec aka 20000 ints/s */ | ||
| 755 | if (bytes > 10000) { | ||
| 756 | /* this if handles the TSO accounting */ | ||
| 757 | if (bytes / packets > 8000) | ||
| 758 | itrval = bulk_latency; | ||
| 759 | else if ((packets < 10) || ((bytes / packets) > 1200)) | ||
| 760 | itrval = bulk_latency; | ||
| 761 | else if ((packets > 35)) | ||
| 762 | itrval = lowest_latency; | ||
| 763 | } else if (bytes / packets > 2000) { | ||
| 764 | itrval = bulk_latency; | ||
| 765 | } else if (packets <= 2 && bytes < 512) { | ||
| 766 | itrval = lowest_latency; | ||
| 767 | } | ||
| 768 | break; | ||
| 769 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | ||
| 770 | if (bytes > 25000) { | ||
| 771 | if (packets > 35) | ||
| 772 | itrval = low_latency; | ||
| 773 | } else if (bytes < 1500) { | ||
| 774 | itrval = low_latency; | ||
| 775 | } | ||
| 776 | break; | ||
| 777 | } | ||
| 778 | |||
| 779 | /* clear work counters since we have the values we need */ | ||
| 780 | ring_container->total_bytes = 0; | ||
| 781 | ring_container->total_packets = 0; | ||
| 782 | |||
| 783 | /* write updated itr to ring container */ | ||
| 784 | ring_container->itr = itrval; | ||
| 785 | } | ||
| 786 | |||
| 787 | static void igc_set_itr(struct igc_q_vector *q_vector) | ||
| 788 | { | ||
| 789 | struct igc_adapter *adapter = q_vector->adapter; | ||
| 790 | u32 new_itr = q_vector->itr_val; | ||
| 791 | u8 current_itr = 0; | ||
| 792 | |||
| 793 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ | ||
| 794 | switch (adapter->link_speed) { | ||
| 795 | case SPEED_10: | ||
| 796 | case SPEED_100: | ||
| 797 | current_itr = 0; | ||
| 798 | new_itr = IGC_4K_ITR; | ||
| 799 | goto set_itr_now; | ||
| 800 | default: | ||
| 801 | break; | ||
| 802 | } | ||
| 803 | |||
| 804 | igc_update_itr(q_vector, &q_vector->tx); | ||
| 805 | igc_update_itr(q_vector, &q_vector->rx); | ||
| 806 | |||
| 807 | current_itr = max(q_vector->rx.itr, q_vector->tx.itr); | ||
| 808 | |||
| 809 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | ||
| 810 | if (current_itr == lowest_latency && | ||
| 811 | ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || | ||
| 812 | (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) | ||
| 813 | current_itr = low_latency; | ||
| 814 | |||
| 815 | switch (current_itr) { | ||
| 816 | /* counts and packets in update_itr are dependent on these numbers */ | ||
| 817 | case lowest_latency: | ||
| 818 | new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ | ||
| 819 | break; | ||
| 820 | case low_latency: | ||
| 821 | new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ | ||
| 822 | break; | ||
| 823 | case bulk_latency: | ||
| 824 | new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ | ||
| 825 | break; | ||
| 826 | default: | ||
| 827 | break; | ||
| 828 | } | ||
| 829 | |||
| 830 | set_itr_now: | ||
| 831 | if (new_itr != q_vector->itr_val) { | ||
| 832 | /* this attempts to bias the interrupt rate towards Bulk | ||
| 833 | * by adding intermediate steps when interrupt rate is | ||
| 834 | * increasing | ||
| 835 | */ | ||
| 836 | new_itr = new_itr > q_vector->itr_val ? | ||
| 837 | max((new_itr * q_vector->itr_val) / | ||
| 838 | (new_itr + (q_vector->itr_val >> 2)), | ||
| 839 | new_itr) : new_itr; | ||
| 840 | /* Don't write the value here; it resets the adapter's | ||
| 841 | * internal timer, and causes us to delay far longer than | ||
| 842 | * we should between interrupts. Instead, we write the ITR | ||
| 843 | * value at the beginning of the next interrupt so the timing | ||
| 844 | * ends up being correct. | ||
| 845 | */ | ||
| 846 | q_vector->itr_val = new_itr; | ||
| 847 | q_vector->set_itr = 1; | ||
| 848 | } | ||
| 849 | } | ||
| 850 | |||
| 851 | static void igc_ring_irq_enable(struct igc_q_vector *q_vector) | ||
| 852 | { | ||
| 853 | struct igc_adapter *adapter = q_vector->adapter; | ||
| 854 | struct igc_hw *hw = &adapter->hw; | ||
| 855 | |||
| 856 | if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || | ||
| 857 | (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { | ||
| 858 | if (adapter->num_q_vectors == 1) | ||
| 859 | igc_set_itr(q_vector); | ||
| 860 | else | ||
| 861 | igc_update_ring_itr(q_vector); | ||
| 862 | } | ||
| 863 | |||
| 864 | if (!test_bit(__IGC_DOWN, &adapter->state)) { | ||
| 865 | if (adapter->msix_entries) | ||
| 866 | wr32(IGC_EIMS, q_vector->eims_value); | ||
| 867 | else | ||
| 868 | igc_irq_enable(adapter); | ||
| 869 | } | ||
| 870 | } | ||
| 871 | |||
| 872 | /** | ||
| 873 | * igc_poll - NAPI Rx polling callback | ||
| 874 | * @napi: napi polling structure | ||
| 875 | * @budget: count of how many packets we should handle | ||
| 876 | */ | ||
| 877 | static int igc_poll(struct napi_struct *napi, int budget) | ||
| 878 | { | ||
| 879 | struct igc_q_vector *q_vector = container_of(napi, | ||
| 880 | struct igc_q_vector, | ||
| 881 | napi); | ||
| 882 | bool clean_complete = true; | ||
| 883 | int work_done = 0; | ||
| 884 | int cleaned = 0; | ||
| 885 | |||
| 886 | if (q_vector->rx.ring) { | ||
| 887 | work_done += cleaned; | ||
| 888 | if (cleaned >= budget) | ||
| 889 | clean_complete = false; | ||
| 890 | } | ||
| 891 | |||
| 892 | /* If all work not completed, return budget and keep polling */ | ||
| 893 | if (!clean_complete) | ||
| 894 | return budget; | ||
| 895 | |||
| 896 | /* If not enough Rx work done, exit the polling mode */ | ||
| 897 | napi_complete_done(napi, work_done); | ||
| 898 | igc_ring_irq_enable(q_vector); | ||
| 899 | |||
| 900 | return 0; | ||
| 901 | } | ||
| 902 | |||
| 903 | /** | ||
| 904 | * igc_set_interrupt_capability - set MSI or MSI-X if supported | ||
| 905 | * @adapter: Pointer to adapter structure | ||
| 906 | * | ||
| 907 | * Attempt to configure interrupts using the best available | ||
| 908 | * capabilities of the hardware and kernel. | ||
| 909 | */ | ||
| 910 | static void igc_set_interrupt_capability(struct igc_adapter *adapter, | ||
| 911 | bool msix) | ||
| 912 | { | ||
| 913 | int numvecs, i; | ||
| 914 | int err; | ||
| 915 | |||
| 916 | if (!msix) | ||
| 917 | goto msi_only; | ||
| 918 | adapter->flags |= IGC_FLAG_HAS_MSIX; | ||
| 919 | |||
| 920 | /* Number of supported queues. */ | ||
| 921 | adapter->num_rx_queues = adapter->rss_queues; | ||
| 922 | |||
| 923 | adapter->num_tx_queues = adapter->rss_queues; | ||
| 924 | |||
| 925 | /* start with one vector for every Rx queue */ | ||
| 926 | numvecs = adapter->num_rx_queues; | ||
| 927 | |||
| 928 | /* if Tx handler is separate add 1 for every Tx queue */ | ||
| 929 | if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) | ||
| 930 | numvecs += adapter->num_tx_queues; | ||
| 931 | |||
| 932 | /* store the number of vectors reserved for queues */ | ||
| 933 | adapter->num_q_vectors = numvecs; | ||
| 934 | |||
| 935 | /* add 1 vector for link status interrupts */ | ||
| 936 | numvecs++; | ||
| 937 | |||
| 938 | adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), | ||
| 939 | GFP_KERNEL); | ||
| 940 | |||
| 941 | if (!adapter->msix_entries) | ||
| 942 | return; | ||
| 943 | |||
| 944 | /* populate entry values */ | ||
| 945 | for (i = 0; i < numvecs; i++) | ||
| 946 | adapter->msix_entries[i].entry = i; | ||
| 947 | |||
| 948 | err = pci_enable_msix_range(adapter->pdev, | ||
| 949 | adapter->msix_entries, | ||
| 950 | numvecs, | ||
| 951 | numvecs); | ||
| 952 | if (err > 0) | ||
| 953 | return; | ||
| 954 | |||
| 955 | kfree(adapter->msix_entries); | ||
| 956 | adapter->msix_entries = NULL; | ||
| 957 | |||
| 958 | igc_reset_interrupt_capability(adapter); | ||
| 959 | |||
| 960 | msi_only: | ||
| 961 | adapter->flags &= ~IGC_FLAG_HAS_MSIX; | ||
| 962 | |||
| 963 | adapter->rss_queues = 1; | ||
| 964 | adapter->flags |= IGC_FLAG_QUEUE_PAIRS; | ||
| 965 | adapter->num_rx_queues = 1; | ||
| 966 | adapter->num_tx_queues = 1; | ||
| 967 | adapter->num_q_vectors = 1; | ||
| 968 | if (!pci_enable_msi(adapter->pdev)) | ||
| 969 | adapter->flags |= IGC_FLAG_HAS_MSI; | ||
| 970 | } | ||
| 971 | |||
| 972 | static void igc_add_ring(struct igc_ring *ring, | ||
| 973 | struct igc_ring_container *head) | ||
| 974 | { | ||
| 975 | head->ring = ring; | ||
| 976 | head->count++; | ||
| 977 | } | ||
| 978 | |||
| 979 | /** | ||
| 980 | * igc_alloc_q_vector - Allocate memory for a single interrupt vector | ||
| 981 | * @adapter: board private structure to initialize | ||
| 982 | * @v_count: q_vectors allocated on adapter, used for ring interleaving | ||
| 983 | * @v_idx: index of vector in adapter struct | ||
| 984 | * @txr_count: total number of Tx rings to allocate | ||
| 985 | * @txr_idx: index of first Tx ring to allocate | ||
| 986 | * @rxr_count: total number of Rx rings to allocate | ||
| 987 | * @rxr_idx: index of first Rx ring to allocate | ||
| 988 | * | ||
| 989 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | ||
| 990 | */ | ||
| 991 | static int igc_alloc_q_vector(struct igc_adapter *adapter, | ||
| 992 | unsigned int v_count, unsigned int v_idx, | ||
| 993 | unsigned int txr_count, unsigned int txr_idx, | ||
| 994 | unsigned int rxr_count, unsigned int rxr_idx) | ||
| 995 | { | ||
| 996 | struct igc_q_vector *q_vector; | ||
| 997 | struct igc_ring *ring; | ||
| 998 | int ring_count, size; | ||
| 999 | |||
| 1000 | /* igc only supports 1 Tx and/or 1 Rx queue per vector */ | ||
| 1001 | if (txr_count > 1 || rxr_count > 1) | ||
| 1002 | return -ENOMEM; | ||
| 1003 | |||
| 1004 | ring_count = txr_count + rxr_count; | ||
| 1005 | size = sizeof(struct igc_q_vector) + | ||
| 1006 | (sizeof(struct igc_ring) * ring_count); | ||
| 1007 | |||
| 1008 | /* allocate q_vector and rings */ | ||
| 1009 | q_vector = adapter->q_vector[v_idx]; | ||
| 1010 | if (!q_vector) | ||
| 1011 | q_vector = kzalloc(size, GFP_KERNEL); | ||
| 1012 | else | ||
| 1013 | memset(q_vector, 0, size); | ||
| 1014 | if (!q_vector) | ||
| 1015 | return -ENOMEM; | ||
| 1016 | |||
| 1017 | /* initialize NAPI */ | ||
| 1018 | netif_napi_add(adapter->netdev, &q_vector->napi, | ||
| 1019 | igc_poll, 64); | ||
| 1020 | |||
| 1021 | /* tie q_vector and adapter together */ | ||
| 1022 | adapter->q_vector[v_idx] = q_vector; | ||
| 1023 | q_vector->adapter = adapter; | ||
| 1024 | |||
| 1025 | /* initialize work limits */ | ||
| 1026 | q_vector->tx.work_limit = adapter->tx_work_limit; | ||
| 1027 | |||
| 1028 | /* initialize ITR configuration */ | ||
| 1029 | q_vector->itr_register = adapter->io_addr + IGC_EITR(0); | ||
| 1030 | q_vector->itr_val = IGC_START_ITR; | ||
| 1031 | |||
| 1032 | /* initialize pointer to rings */ | ||
| 1033 | ring = q_vector->ring; | ||
| 1034 | |||
| 1035 | /* initialize ITR */ | ||
| 1036 | if (rxr_count) { | ||
| 1037 | /* rx or rx/tx vector */ | ||
| 1038 | if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) | ||
| 1039 | q_vector->itr_val = adapter->rx_itr_setting; | ||
| 1040 | } else { | ||
| 1041 | /* tx only vector */ | ||
| 1042 | if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) | ||
| 1043 | q_vector->itr_val = adapter->tx_itr_setting; | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | if (txr_count) { | ||
| 1047 | /* assign generic ring traits */ | ||
| 1048 | ring->dev = &adapter->pdev->dev; | ||
| 1049 | ring->netdev = adapter->netdev; | ||
| 1050 | |||
| 1051 | /* configure backlink on ring */ | ||
| 1052 | ring->q_vector = q_vector; | ||
| 1053 | |||
| 1054 | /* update q_vector Tx values */ | ||
| 1055 | igc_add_ring(ring, &q_vector->tx); | ||
| 1056 | |||
| 1057 | /* apply Tx specific ring traits */ | ||
| 1058 | ring->count = adapter->tx_ring_count; | ||
| 1059 | ring->queue_index = txr_idx; | ||
| 1060 | |||
| 1061 | /* assign ring to adapter */ | ||
| 1062 | adapter->tx_ring[txr_idx] = ring; | ||
| 1063 | |||
| 1064 | /* push pointer to next ring */ | ||
| 1065 | ring++; | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | if (rxr_count) { | ||
| 1069 | /* assign generic ring traits */ | ||
| 1070 | ring->dev = &adapter->pdev->dev; | ||
| 1071 | ring->netdev = adapter->netdev; | ||
| 1072 | |||
| 1073 | /* configure backlink on ring */ | ||
| 1074 | ring->q_vector = q_vector; | ||
| 1075 | |||
| 1076 | /* update q_vector Rx values */ | ||
| 1077 | igc_add_ring(ring, &q_vector->rx); | ||
| 1078 | |||
| 1079 | /* apply Rx specific ring traits */ | ||
| 1080 | ring->count = adapter->rx_ring_count; | ||
| 1081 | ring->queue_index = rxr_idx; | ||
| 1082 | |||
| 1083 | /* assign ring to adapter */ | ||
| 1084 | adapter->rx_ring[rxr_idx] = ring; | ||
| 1085 | } | ||
| 1086 | |||
| 1087 | return 0; | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | /** | ||
| 1091 | * igc_alloc_q_vectors - Allocate memory for interrupt vectors | ||
| 1092 | * @adapter: board private structure to initialize | ||
| 1093 | * | ||
| 1094 | * We allocate one q_vector per queue interrupt. If allocation fails we | ||
| 1095 | * return -ENOMEM. | ||
| 1096 | */ | ||
| 1097 | static int igc_alloc_q_vectors(struct igc_adapter *adapter) | ||
| 1098 | { | ||
| 1099 | int rxr_remaining = adapter->num_rx_queues; | ||
| 1100 | int txr_remaining = adapter->num_tx_queues; | ||
| 1101 | int rxr_idx = 0, txr_idx = 0, v_idx = 0; | ||
| 1102 | int q_vectors = adapter->num_q_vectors; | ||
| 1103 | int err; | ||
| 1104 | |||
| 1105 | if (q_vectors >= (rxr_remaining + txr_remaining)) { | ||
| 1106 | for (; rxr_remaining; v_idx++) { | ||
| 1107 | err = igc_alloc_q_vector(adapter, q_vectors, v_idx, | ||
| 1108 | 0, 0, 1, rxr_idx); | ||
| 1109 | |||
| 1110 | if (err) | ||
| 1111 | goto err_out; | ||
| 1112 | |||
| 1113 | /* update counts and index */ | ||
| 1114 | rxr_remaining--; | ||
| 1115 | rxr_idx++; | ||
| 1116 | } | ||
| 1117 | } | ||
| 1118 | |||
| 1119 | for (; v_idx < q_vectors; v_idx++) { | ||
| 1120 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); | ||
| 1121 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); | ||
| 1122 | |||
| 1123 | err = igc_alloc_q_vector(adapter, q_vectors, v_idx, | ||
| 1124 | tqpv, txr_idx, rqpv, rxr_idx); | ||
| 1125 | |||
| 1126 | if (err) | ||
| 1127 | goto err_out; | ||
| 1128 | |||
| 1129 | /* update counts and index */ | ||
| 1130 | rxr_remaining -= rqpv; | ||
| 1131 | txr_remaining -= tqpv; | ||
| 1132 | rxr_idx++; | ||
| 1133 | txr_idx++; | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | return 0; | ||
| 1137 | |||
| 1138 | err_out: | ||
| 1139 | adapter->num_tx_queues = 0; | ||
| 1140 | adapter->num_rx_queues = 0; | ||
| 1141 | adapter->num_q_vectors = 0; | ||
| 1142 | |||
| 1143 | while (v_idx--) | ||
| 1144 | igc_free_q_vector(adapter, v_idx); | ||
| 1145 | |||
| 1146 | return -ENOMEM; | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | /** | ||
| 1150 | * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors | ||
| 1151 | * @adapter: Pointer to adapter structure | ||
| 1152 | * | ||
| 1153 | * This function initializes the interrupts and allocates all of the queues. | ||
| 1154 | */ | ||
| 1155 | static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) | ||
| 1156 | { | ||
| 1157 | struct pci_dev *pdev = adapter->pdev; | ||
| 1158 | int err = 0; | ||
| 1159 | |||
| 1160 | igc_set_interrupt_capability(adapter, msix); | ||
| 1161 | |||
| 1162 | err = igc_alloc_q_vectors(adapter); | ||
| 1163 | if (err) { | ||
| 1164 | dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); | ||
| 1165 | goto err_alloc_q_vectors; | ||
| 1166 | } | ||
| 1167 | |||
| 1168 | return 0; | ||
| 1169 | |||
| 1170 | err_alloc_q_vectors: | ||
| 1171 | igc_reset_interrupt_capability(adapter); | ||
| 1172 | return err; | ||
| 1173 | } | ||
| 1174 | |||
| 1175 | static void igc_free_irq(struct igc_adapter *adapter) | ||
| 1176 | { | ||
| 1177 | if (adapter->msix_entries) { | ||
| 1178 | int vector = 0, i; | ||
| 1179 | |||
| 1180 | free_irq(adapter->msix_entries[vector++].vector, adapter); | ||
| 1181 | |||
| 1182 | for (i = 0; i < adapter->num_q_vectors; i++) | ||
| 1183 | free_irq(adapter->msix_entries[vector++].vector, | ||
| 1184 | adapter->q_vector[i]); | ||
| 1185 | } else { | ||
| 1186 | free_irq(adapter->pdev->irq, adapter); | ||
| 1187 | } | ||
| 1188 | } | ||
| 1189 | |||
| 1190 | /** | ||
| 1191 | * igc_irq_disable - Mask off interrupt generation on the NIC | ||
| 1192 | * @adapter: board private structure | ||
| 1193 | */ | ||
| 1194 | static void igc_irq_disable(struct igc_adapter *adapter) | ||
| 1195 | { | ||
| 1196 | struct igc_hw *hw = &adapter->hw; | ||
| 1197 | |||
| 1198 | if (adapter->msix_entries) { | ||
| 1199 | u32 regval = rd32(IGC_EIAM); | ||
| 1200 | |||
| 1201 | wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); | ||
| 1202 | wr32(IGC_EIMC, adapter->eims_enable_mask); | ||
| 1203 | regval = rd32(IGC_EIAC); | ||
| 1204 | wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); | ||
| 1205 | } | ||
| 1206 | |||
| 1207 | wr32(IGC_IAM, 0); | ||
| 1208 | wr32(IGC_IMC, ~0); | ||
| 1209 | wrfl(); | ||
| 1210 | |||
| 1211 | if (adapter->msix_entries) { | ||
| 1212 | int vector = 0, i; | ||
| 1213 | |||
| 1214 | synchronize_irq(adapter->msix_entries[vector++].vector); | ||
| 1215 | |||
| 1216 | for (i = 0; i < adapter->num_q_vectors; i++) | ||
| 1217 | synchronize_irq(adapter->msix_entries[vector++].vector); | ||
| 1218 | } else { | ||
| 1219 | synchronize_irq(adapter->pdev->irq); | ||
| 1220 | } | ||
| 1221 | } | ||
| 1222 | |||
| 1223 | /** | ||
| 1224 | * igc_irq_enable - Enable default interrupt generation settings | ||
| 1225 | * @adapter: board private structure | ||
| 1226 | */ | ||
| 1227 | static void igc_irq_enable(struct igc_adapter *adapter) | ||
| 1228 | { | ||
| 1229 | struct igc_hw *hw = &adapter->hw; | ||
| 1230 | |||
| 1231 | if (adapter->msix_entries) { | ||
| 1232 | u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; | ||
| 1233 | u32 regval = rd32(IGC_EIAC); | ||
| 1234 | |||
| 1235 | wr32(IGC_EIAC, regval | adapter->eims_enable_mask); | ||
| 1236 | regval = rd32(IGC_EIAM); | ||
| 1237 | wr32(IGC_EIAM, regval | adapter->eims_enable_mask); | ||
| 1238 | wr32(IGC_EIMS, adapter->eims_enable_mask); | ||
| 1239 | wr32(IGC_IMS, ims); | ||
| 1240 | } else { | ||
| 1241 | wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); | ||
| 1242 | wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); | ||
| 1243 | } | ||
| 1244 | } | ||
| 1245 | |||
| 1246 | /** | ||
| 1247 | * igc_request_irq - initialize interrupts | ||
| 1248 | * @adapter: Pointer to adapter structure | ||
| 1249 | * | ||
| 1250 | * Attempts to configure interrupts using the best available | ||
| 1251 | * capabilities of the hardware and kernel. | ||
| 1252 | */ | ||
| 1253 | static int igc_request_irq(struct igc_adapter *adapter) | ||
| 1254 | { | ||
| 1255 | int err = 0; | ||
| 1256 | |||
| 1257 | if (adapter->flags & IGC_FLAG_HAS_MSIX) { | ||
| 1258 | err = igc_request_msix(adapter); | ||
| 1259 | if (!err) | ||
| 1260 | goto request_done; | ||
| 1261 | /* fall back to MSI */ | ||
| 1262 | |||
| 1263 | igc_clear_interrupt_scheme(adapter); | ||
| 1264 | err = igc_init_interrupt_scheme(adapter, false); | ||
| 1265 | if (err) | ||
| 1266 | goto request_done; | ||
| 1267 | igc_configure(adapter); | ||
| 1268 | } | ||
| 1269 | |||
| 1270 | request_done: | ||
| 1271 | return err; | ||
| 1272 | } | ||
| 1273 | |||
| 1274 | static void igc_write_itr(struct igc_q_vector *q_vector) | ||
| 1275 | { | ||
| 1276 | u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; | ||
| 1277 | |||
| 1278 | if (!q_vector->set_itr) | ||
| 1279 | return; | ||
| 1280 | |||
| 1281 | if (!itr_val) | ||
| 1282 | itr_val = IGC_ITR_VAL_MASK; | ||
| 1283 | |||
| 1284 | itr_val |= IGC_EITR_CNT_IGNR; | ||
| 1285 | |||
| 1286 | writel(itr_val, q_vector->itr_register); | ||
| 1287 | q_vector->set_itr = 0; | ||
| 1288 | } | ||
| 1289 | |||
| 1290 | /** | ||
| 313 | * igc_open - Called when a network interface is made active | 1291 | * igc_open - Called when a network interface is made active |
| 314 | * @netdev: network interface device structure | 1292 | * @netdev: network interface device structure |
| 315 | * | 1293 | * |
| @@ -325,6 +1303,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) | |||
| 325 | { | 1303 | { |
| 326 | struct igc_adapter *adapter = netdev_priv(netdev); | 1304 | struct igc_adapter *adapter = netdev_priv(netdev); |
| 327 | struct igc_hw *hw = &adapter->hw; | 1305 | struct igc_hw *hw = &adapter->hw; |
| 1306 | int err = 0; | ||
| 328 | int i = 0; | 1307 | int i = 0; |
| 329 | 1308 | ||
| 330 | /* disallow open during test */ | 1309 | /* disallow open during test */ |
| @@ -340,15 +1319,40 @@ static int __igc_open(struct net_device *netdev, bool resuming) | |||
| 340 | 1319 | ||
| 341 | igc_configure(adapter); | 1320 | igc_configure(adapter); |
| 342 | 1321 | ||
| 1322 | err = igc_request_irq(adapter); | ||
| 1323 | if (err) | ||
| 1324 | goto err_req_irq; | ||
| 1325 | |||
| 1326 | /* Notify the stack of the actual queue counts. */ | ||
| 1327 | netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); | ||
| 1328 | if (err) | ||
| 1329 | goto err_set_queues; | ||
| 1330 | |||
| 1331 | err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); | ||
| 1332 | if (err) | ||
| 1333 | goto err_set_queues; | ||
| 1334 | |||
| 343 | clear_bit(__IGC_DOWN, &adapter->state); | 1335 | clear_bit(__IGC_DOWN, &adapter->state); |
| 344 | 1336 | ||
| 345 | for (i = 0; i < adapter->num_q_vectors; i++) | 1337 | for (i = 0; i < adapter->num_q_vectors; i++) |
| 346 | napi_enable(&adapter->q_vector[i]->napi); | 1338 | napi_enable(&adapter->q_vector[i]->napi); |
| 347 | 1339 | ||
| 1340 | /* Clear any pending interrupts. */ | ||
| 1341 | rd32(IGC_ICR); | ||
| 1342 | igc_irq_enable(adapter); | ||
| 1343 | |||
| 348 | /* start the watchdog. */ | 1344 | /* start the watchdog. */ |
| 349 | hw->mac.get_link_status = 1; | 1345 | hw->mac.get_link_status = 1; |
| 350 | 1346 | ||
| 351 | return IGC_SUCCESS; | 1347 | return IGC_SUCCESS; |
| 1348 | |||
| 1349 | err_set_queues: | ||
| 1350 | igc_free_irq(adapter); | ||
| 1351 | err_req_irq: | ||
| 1352 | igc_release_hw_control(adapter); | ||
| 1353 | igc_power_down_link(adapter); | ||
| 1354 | |||
| 1355 | return err; | ||
| 352 | } | 1356 | } |
| 353 | 1357 | ||
| 354 | static int igc_open(struct net_device *netdev) | 1358 | static int igc_open(struct net_device *netdev) |
| @@ -377,6 +1381,8 @@ static int __igc_close(struct net_device *netdev, bool suspending) | |||
| 377 | 1381 | ||
| 378 | igc_release_hw_control(adapter); | 1382 | igc_release_hw_control(adapter); |
| 379 | 1383 | ||
| 1384 | igc_free_irq(adapter); | ||
| 1385 | |||
| 380 | return 0; | 1386 | return 0; |
| 381 | } | 1387 | } |
| 382 | 1388 | ||
| @@ -595,6 +1601,8 @@ static int igc_probe(struct pci_dev *pdev, | |||
| 595 | err_register: | 1601 | err_register: |
| 596 | igc_release_hw_control(adapter); | 1602 | igc_release_hw_control(adapter); |
| 597 | err_sw_init: | 1603 | err_sw_init: |
| 1604 | igc_clear_interrupt_scheme(adapter); | ||
| 1605 | iounmap(adapter->io_addr); | ||
| 598 | err_ioremap: | 1606 | err_ioremap: |
| 599 | free_netdev(netdev); | 1607 | free_netdev(netdev); |
| 600 | err_alloc_etherdev: | 1608 | err_alloc_etherdev: |
| @@ -672,6 +1680,14 @@ static int igc_sw_init(struct igc_adapter *adapter) | |||
| 672 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + | 1680 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + |
| 673 | VLAN_HLEN; | 1681 | VLAN_HLEN; |
| 674 | 1682 | ||
| 1683 | if (igc_init_interrupt_scheme(adapter, true)) { | ||
| 1684 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | ||
| 1685 | return -ENOMEM; | ||
| 1686 | } | ||
| 1687 | |||
| 1688 | /* Explicitly disable IRQ since the NIC can be in any state. */ | ||
| 1689 | igc_irq_disable(adapter); | ||
| 1690 | |||
| 675 | set_bit(__IGC_DOWN, &adapter->state); | 1691 | set_bit(__IGC_DOWN, &adapter->state); |
| 676 | 1692 | ||
| 677 | return 0; | 1693 | return 0; |
