aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c3273
1 files changed, 1789 insertions, 1484 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 714c3a4a44e..0cab5e2b089 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -63,6 +63,7 @@ static const struct e1000_info *igb_info_tbl[] = {
63static struct pci_device_id igb_pci_tbl[] = { 63static struct pci_device_id igb_pci_tbl[] = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, 67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, 68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
@@ -81,6 +82,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
81static int igb_setup_all_rx_resources(struct igb_adapter *); 82static int igb_setup_all_rx_resources(struct igb_adapter *);
82static void igb_free_all_tx_resources(struct igb_adapter *); 83static void igb_free_all_tx_resources(struct igb_adapter *);
83static void igb_free_all_rx_resources(struct igb_adapter *); 84static void igb_free_all_rx_resources(struct igb_adapter *);
85static void igb_setup_mrqc(struct igb_adapter *);
84void igb_update_stats(struct igb_adapter *); 86void igb_update_stats(struct igb_adapter *);
85static int igb_probe(struct pci_dev *, const struct pci_device_id *); 87static int igb_probe(struct pci_dev *, const struct pci_device_id *);
86static void __devexit igb_remove(struct pci_dev *pdev); 88static void __devexit igb_remove(struct pci_dev *pdev);
@@ -89,7 +91,6 @@ static int igb_open(struct net_device *);
89static int igb_close(struct net_device *); 91static int igb_close(struct net_device *);
90static void igb_configure_tx(struct igb_adapter *); 92static void igb_configure_tx(struct igb_adapter *);
91static void igb_configure_rx(struct igb_adapter *); 93static void igb_configure_rx(struct igb_adapter *);
92static void igb_setup_rctl(struct igb_adapter *);
93static void igb_clean_all_tx_rings(struct igb_adapter *); 94static void igb_clean_all_tx_rings(struct igb_adapter *);
94static void igb_clean_all_rx_rings(struct igb_adapter *); 95static void igb_clean_all_rx_rings(struct igb_adapter *);
95static void igb_clean_tx_ring(struct igb_ring *); 96static void igb_clean_tx_ring(struct igb_ring *);
@@ -98,28 +99,22 @@ static void igb_set_rx_mode(struct net_device *);
98static void igb_update_phy_info(unsigned long); 99static void igb_update_phy_info(unsigned long);
99static void igb_watchdog(unsigned long); 100static void igb_watchdog(unsigned long);
100static void igb_watchdog_task(struct work_struct *); 101static void igb_watchdog_task(struct work_struct *);
101static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, 102static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
102 struct net_device *,
103 struct igb_ring *);
104static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
105 struct net_device *);
106static struct net_device_stats *igb_get_stats(struct net_device *); 103static struct net_device_stats *igb_get_stats(struct net_device *);
107static int igb_change_mtu(struct net_device *, int); 104static int igb_change_mtu(struct net_device *, int);
108static int igb_set_mac(struct net_device *, void *); 105static int igb_set_mac(struct net_device *, void *);
106static void igb_set_uta(struct igb_adapter *adapter);
109static irqreturn_t igb_intr(int irq, void *); 107static irqreturn_t igb_intr(int irq, void *);
110static irqreturn_t igb_intr_msi(int irq, void *); 108static irqreturn_t igb_intr_msi(int irq, void *);
111static irqreturn_t igb_msix_other(int irq, void *); 109static irqreturn_t igb_msix_other(int irq, void *);
112static irqreturn_t igb_msix_rx(int irq, void *); 110static irqreturn_t igb_msix_ring(int irq, void *);
113static irqreturn_t igb_msix_tx(int irq, void *);
114#ifdef CONFIG_IGB_DCA 111#ifdef CONFIG_IGB_DCA
115static void igb_update_rx_dca(struct igb_ring *); 112static void igb_update_dca(struct igb_q_vector *);
116static void igb_update_tx_dca(struct igb_ring *);
117static void igb_setup_dca(struct igb_adapter *); 113static void igb_setup_dca(struct igb_adapter *);
118#endif /* CONFIG_IGB_DCA */ 114#endif /* CONFIG_IGB_DCA */
119static bool igb_clean_tx_irq(struct igb_ring *); 115static bool igb_clean_tx_irq(struct igb_q_vector *);
120static int igb_poll(struct napi_struct *, int); 116static int igb_poll(struct napi_struct *, int);
121static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 117static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
122static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
123static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 118static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
124static void igb_tx_timeout(struct net_device *); 119static void igb_tx_timeout(struct net_device *);
125static void igb_reset_task(struct work_struct *); 120static void igb_reset_task(struct work_struct *);
@@ -127,57 +122,13 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
127static void igb_vlan_rx_add_vid(struct net_device *, u16); 122static void igb_vlan_rx_add_vid(struct net_device *, u16);
128static void igb_vlan_rx_kill_vid(struct net_device *, u16); 123static void igb_vlan_rx_kill_vid(struct net_device *, u16);
129static void igb_restore_vlan(struct igb_adapter *); 124static void igb_restore_vlan(struct igb_adapter *);
125static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
130static void igb_ping_all_vfs(struct igb_adapter *); 126static void igb_ping_all_vfs(struct igb_adapter *);
131static void igb_msg_task(struct igb_adapter *); 127static void igb_msg_task(struct igb_adapter *);
132static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
133static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
134static void igb_vmm_control(struct igb_adapter *); 128static void igb_vmm_control(struct igb_adapter *);
135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); 129static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
136static void igb_restore_vf_multicasts(struct igb_adapter *adapter); 130static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
137 131
138static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
139{
140 u32 reg_data;
141
142 reg_data = rd32(E1000_VMOLR(vfn));
143 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
144 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
145 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
146 E1000_VMOLR_AUPE | /* Accept untagged packets */
147 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
148 wr32(E1000_VMOLR(vfn), reg_data);
149}
150
151static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
152 int vfn)
153{
154 struct e1000_hw *hw = &adapter->hw;
155 u32 vmolr;
156
157 /* if it isn't the PF check to see if VFs are enabled and
158 * increase the size to support vlan tags */
159 if (vfn < adapter->vfs_allocated_count &&
160 adapter->vf_data[vfn].vlans_enabled)
161 size += VLAN_TAG_SIZE;
162
163 vmolr = rd32(E1000_VMOLR(vfn));
164 vmolr &= ~E1000_VMOLR_RLPML_MASK;
165 vmolr |= size | E1000_VMOLR_LPE;
166 wr32(E1000_VMOLR(vfn), vmolr);
167
168 return 0;
169}
170
171static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
172{
173 u32 reg_data;
174
175 reg_data = rd32(E1000_RAH(entry));
176 reg_data &= ~E1000_RAH_POOL_MASK;
177 reg_data |= E1000_RAH_POOL_1 << pool;;
178 wr32(E1000_RAH(entry), reg_data);
179}
180
181#ifdef CONFIG_PM 132#ifdef CONFIG_PM
182static int igb_suspend(struct pci_dev *, pm_message_t); 133static int igb_suspend(struct pci_dev *, pm_message_t);
183static int igb_resume(struct pci_dev *); 134static int igb_resume(struct pci_dev *);
@@ -228,46 +179,12 @@ static struct pci_driver igb_driver = {
228 .err_handler = &igb_err_handler 179 .err_handler = &igb_err_handler
229}; 180};
230 181
231static int global_quad_port_a; /* global quad port a indication */
232
233MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 182MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
234MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); 183MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
235MODULE_LICENSE("GPL"); 184MODULE_LICENSE("GPL");
236MODULE_VERSION(DRV_VERSION); 185MODULE_VERSION(DRV_VERSION);
237 186
238/** 187/**
239 * Scale the NIC clock cycle by a large factor so that
240 * relatively small clock corrections can be added or
241 * substracted at each clock tick. The drawbacks of a
242 * large factor are a) that the clock register overflows
243 * more quickly (not such a big deal) and b) that the
244 * increment per tick has to fit into 24 bits.
245 *
246 * Note that
247 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
248 * IGB_TSYNC_SCALE
249 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
250 *
251 * The base scale factor is intentionally a power of two
252 * so that the division in %struct timecounter can be done with
253 * a shift.
254 */
255#define IGB_TSYNC_SHIFT (19)
256#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
257
258/**
259 * The duration of one clock cycle of the NIC.
260 *
261 * @todo This hard-coded value is part of the specification and might change
262 * in future hardware revisions. Add revision check.
263 */
264#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
265
266#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
267# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
268#endif
269
270/**
271 * igb_read_clock - read raw cycle counter (to be used by time counter) 188 * igb_read_clock - read raw cycle counter (to be used by time counter)
272 */ 189 */
273static cycle_t igb_read_clock(const struct cyclecounter *tc) 190static cycle_t igb_read_clock(const struct cyclecounter *tc)
@@ -275,11 +192,11 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
275 struct igb_adapter *adapter = 192 struct igb_adapter *adapter =
276 container_of(tc, struct igb_adapter, cycles); 193 container_of(tc, struct igb_adapter, cycles);
277 struct e1000_hw *hw = &adapter->hw; 194 struct e1000_hw *hw = &adapter->hw;
278 u64 stamp; 195 u64 stamp = 0;
279 196 int shift = 0;
280 stamp = rd32(E1000_SYSTIML);
281 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
282 197
198 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
199 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
283 return stamp; 200 return stamp;
284} 201}
285 202
@@ -320,17 +237,6 @@ static char *igb_get_time_str(struct igb_adapter *adapter,
320#endif 237#endif
321 238
322/** 239/**
323 * igb_desc_unused - calculate if we have unused descriptors
324 **/
325static int igb_desc_unused(struct igb_ring *ring)
326{
327 if (ring->next_to_clean > ring->next_to_use)
328 return ring->next_to_clean - ring->next_to_use - 1;
329
330 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
331}
332
333/**
334 * igb_init_module - Driver Registration Routine 240 * igb_init_module - Driver Registration Routine
335 * 241 *
336 * igb_init_module is the first routine called when the driver is 242 * igb_init_module is the first routine called when the driver is
@@ -344,12 +250,9 @@ static int __init igb_init_module(void)
344 250
345 printk(KERN_INFO "%s\n", igb_copyright); 251 printk(KERN_INFO "%s\n", igb_copyright);
346 252
347 global_quad_port_a = 0;
348
349#ifdef CONFIG_IGB_DCA 253#ifdef CONFIG_IGB_DCA
350 dca_register_notify(&dca_notifier); 254 dca_register_notify(&dca_notifier);
351#endif 255#endif
352
353 ret = pci_register_driver(&igb_driver); 256 ret = pci_register_driver(&igb_driver);
354 return ret; 257 return ret;
355} 258}
@@ -382,8 +285,8 @@ module_exit(igb_exit_module);
382 **/ 285 **/
383static void igb_cache_ring_register(struct igb_adapter *adapter) 286static void igb_cache_ring_register(struct igb_adapter *adapter)
384{ 287{
385 int i; 288 int i = 0, j = 0;
386 unsigned int rbase_offset = adapter->vfs_allocated_count; 289 u32 rbase_offset = adapter->vfs_allocated_count;
387 290
388 switch (adapter->hw.mac.type) { 291 switch (adapter->hw.mac.type) {
389 case e1000_82576: 292 case e1000_82576:
@@ -392,23 +295,36 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
392 * In order to avoid collision we start at the first free queue 295 * In order to avoid collision we start at the first free queue
393 * and continue consuming queues in the same sequence 296 * and continue consuming queues in the same sequence
394 */ 297 */
395 for (i = 0; i < adapter->num_rx_queues; i++) 298 if (adapter->vfs_allocated_count) {
396 adapter->rx_ring[i].reg_idx = rbase_offset + 299 for (; i < adapter->rss_queues; i++)
397 Q_IDX_82576(i); 300 adapter->rx_ring[i].reg_idx = rbase_offset +
398 for (i = 0; i < adapter->num_tx_queues; i++) 301 Q_IDX_82576(i);
399 adapter->tx_ring[i].reg_idx = rbase_offset + 302 for (; j < adapter->rss_queues; j++)
400 Q_IDX_82576(i); 303 adapter->tx_ring[j].reg_idx = rbase_offset +
401 break; 304 Q_IDX_82576(j);
305 }
402 case e1000_82575: 306 case e1000_82575:
403 default: 307 default:
404 for (i = 0; i < adapter->num_rx_queues; i++) 308 for (; i < adapter->num_rx_queues; i++)
405 adapter->rx_ring[i].reg_idx = i; 309 adapter->rx_ring[i].reg_idx = rbase_offset + i;
406 for (i = 0; i < adapter->num_tx_queues; i++) 310 for (; j < adapter->num_tx_queues; j++)
407 adapter->tx_ring[i].reg_idx = i; 311 adapter->tx_ring[j].reg_idx = rbase_offset + j;
408 break; 312 break;
409 } 313 }
410} 314}
411 315
316static void igb_free_queues(struct igb_adapter *adapter)
317{
318 kfree(adapter->tx_ring);
319 kfree(adapter->rx_ring);
320
321 adapter->tx_ring = NULL;
322 adapter->rx_ring = NULL;
323
324 adapter->num_rx_queues = 0;
325 adapter->num_tx_queues = 0;
326}
327
412/** 328/**
413 * igb_alloc_queues - Allocate memory for all rings 329 * igb_alloc_queues - Allocate memory for all rings
414 * @adapter: board private structure to initialize 330 * @adapter: board private structure to initialize
@@ -423,59 +339,61 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
423 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 339 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
424 sizeof(struct igb_ring), GFP_KERNEL); 340 sizeof(struct igb_ring), GFP_KERNEL);
425 if (!adapter->tx_ring) 341 if (!adapter->tx_ring)
426 return -ENOMEM; 342 goto err;
427 343
428 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 344 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
429 sizeof(struct igb_ring), GFP_KERNEL); 345 sizeof(struct igb_ring), GFP_KERNEL);
430 if (!adapter->rx_ring) { 346 if (!adapter->rx_ring)
431 kfree(adapter->tx_ring); 347 goto err;
432 return -ENOMEM;
433 }
434
435 adapter->rx_ring->buddy = adapter->tx_ring;
436 348
437 for (i = 0; i < adapter->num_tx_queues; i++) { 349 for (i = 0; i < adapter->num_tx_queues; i++) {
438 struct igb_ring *ring = &(adapter->tx_ring[i]); 350 struct igb_ring *ring = &(adapter->tx_ring[i]);
439 ring->count = adapter->tx_ring_count; 351 ring->count = adapter->tx_ring_count;
440 ring->adapter = adapter;
441 ring->queue_index = i; 352 ring->queue_index = i;
353 ring->pdev = adapter->pdev;
354 ring->netdev = adapter->netdev;
355 /* For 82575, context index must be unique per ring. */
356 if (adapter->hw.mac.type == e1000_82575)
357 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
442 } 358 }
359
443 for (i = 0; i < adapter->num_rx_queues; i++) { 360 for (i = 0; i < adapter->num_rx_queues; i++) {
444 struct igb_ring *ring = &(adapter->rx_ring[i]); 361 struct igb_ring *ring = &(adapter->rx_ring[i]);
445 ring->count = adapter->rx_ring_count; 362 ring->count = adapter->rx_ring_count;
446 ring->adapter = adapter;
447 ring->queue_index = i; 363 ring->queue_index = i;
448 ring->itr_register = E1000_ITR; 364 ring->pdev = adapter->pdev;
449 365 ring->netdev = adapter->netdev;
450 /* set a default napi handler for each rx_ring */ 366 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
451 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); 367 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
368 /* set flag indicating ring supports SCTP checksum offload */
369 if (adapter->hw.mac.type >= e1000_82576)
370 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
452 } 371 }
453 372
454 igb_cache_ring_register(adapter); 373 igb_cache_ring_register(adapter);
455 return 0;
456}
457
458static void igb_free_queues(struct igb_adapter *adapter)
459{
460 int i;
461 374
462 for (i = 0; i < adapter->num_rx_queues; i++) 375 return 0;
463 netif_napi_del(&adapter->rx_ring[i].napi);
464 376
465 adapter->num_rx_queues = 0; 377err:
466 adapter->num_tx_queues = 0; 378 igb_free_queues(adapter);
467 379
468 kfree(adapter->tx_ring); 380 return -ENOMEM;
469 kfree(adapter->rx_ring);
470} 381}
471 382
472#define IGB_N0_QUEUE -1 383#define IGB_N0_QUEUE -1
473static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, 384static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
474 int tx_queue, int msix_vector)
475{ 385{
476 u32 msixbm = 0; 386 u32 msixbm = 0;
387 struct igb_adapter *adapter = q_vector->adapter;
477 struct e1000_hw *hw = &adapter->hw; 388 struct e1000_hw *hw = &adapter->hw;
478 u32 ivar, index; 389 u32 ivar, index;
390 int rx_queue = IGB_N0_QUEUE;
391 int tx_queue = IGB_N0_QUEUE;
392
393 if (q_vector->rx_ring)
394 rx_queue = q_vector->rx_ring->reg_idx;
395 if (q_vector->tx_ring)
396 tx_queue = q_vector->tx_ring->reg_idx;
479 397
480 switch (hw->mac.type) { 398 switch (hw->mac.type) {
481 case e1000_82575: 399 case e1000_82575:
@@ -483,16 +401,12 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
483 bitmask for the EICR/EIMS/EIMC registers. To assign one 401 bitmask for the EICR/EIMS/EIMC registers. To assign one
484 or more queues to a vector, we write the appropriate bits 402 or more queues to a vector, we write the appropriate bits
485 into the MSIXBM register for that vector. */ 403 into the MSIXBM register for that vector. */
486 if (rx_queue > IGB_N0_QUEUE) { 404 if (rx_queue > IGB_N0_QUEUE)
487 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 405 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
488 adapter->rx_ring[rx_queue].eims_value = msixbm; 406 if (tx_queue > IGB_N0_QUEUE)
489 }
490 if (tx_queue > IGB_N0_QUEUE) {
491 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 407 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
492 adapter->tx_ring[tx_queue].eims_value =
493 E1000_EICR_TX_QUEUE0 << tx_queue;
494 }
495 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 408 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
409 q_vector->eims_value = msixbm;
496 break; 410 break;
497 case e1000_82576: 411 case e1000_82576:
498 /* 82576 uses a table-based method for assigning vectors. 412 /* 82576 uses a table-based method for assigning vectors.
@@ -500,35 +414,34 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
500 a vector number along with a "valid" bit. Sadly, the layout 414 a vector number along with a "valid" bit. Sadly, the layout
501 of the table is somewhat counterintuitive. */ 415 of the table is somewhat counterintuitive. */
502 if (rx_queue > IGB_N0_QUEUE) { 416 if (rx_queue > IGB_N0_QUEUE) {
503 index = (rx_queue >> 1) + adapter->vfs_allocated_count; 417 index = (rx_queue & 0x7);
504 ivar = array_rd32(E1000_IVAR0, index); 418 ivar = array_rd32(E1000_IVAR0, index);
505 if (rx_queue & 0x1) { 419 if (rx_queue < 8) {
506 /* vector goes into third byte of register */
507 ivar = ivar & 0xFF00FFFF;
508 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
509 } else {
510 /* vector goes into low byte of register */ 420 /* vector goes into low byte of register */
511 ivar = ivar & 0xFFFFFF00; 421 ivar = ivar & 0xFFFFFF00;
512 ivar |= msix_vector | E1000_IVAR_VALID; 422 ivar |= msix_vector | E1000_IVAR_VALID;
423 } else {
424 /* vector goes into third byte of register */
425 ivar = ivar & 0xFF00FFFF;
426 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
513 } 427 }
514 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
515 array_wr32(E1000_IVAR0, index, ivar); 428 array_wr32(E1000_IVAR0, index, ivar);
516 } 429 }
517 if (tx_queue > IGB_N0_QUEUE) { 430 if (tx_queue > IGB_N0_QUEUE) {
518 index = (tx_queue >> 1) + adapter->vfs_allocated_count; 431 index = (tx_queue & 0x7);
519 ivar = array_rd32(E1000_IVAR0, index); 432 ivar = array_rd32(E1000_IVAR0, index);
520 if (tx_queue & 0x1) { 433 if (tx_queue < 8) {
521 /* vector goes into high byte of register */
522 ivar = ivar & 0x00FFFFFF;
523 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
524 } else {
525 /* vector goes into second byte of register */ 434 /* vector goes into second byte of register */
526 ivar = ivar & 0xFFFF00FF; 435 ivar = ivar & 0xFFFF00FF;
527 ivar |= (msix_vector | E1000_IVAR_VALID) << 8; 436 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
437 } else {
438 /* vector goes into high byte of register */
439 ivar = ivar & 0x00FFFFFF;
440 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
528 } 441 }
529 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
530 array_wr32(E1000_IVAR0, index, ivar); 442 array_wr32(E1000_IVAR0, index, ivar);
531 } 443 }
444 q_vector->eims_value = 1 << msix_vector;
532 break; 445 break;
533 default: 446 default:
534 BUG(); 447 BUG();
@@ -549,43 +462,10 @@ static void igb_configure_msix(struct igb_adapter *adapter)
549 struct e1000_hw *hw = &adapter->hw; 462 struct e1000_hw *hw = &adapter->hw;
550 463
551 adapter->eims_enable_mask = 0; 464 adapter->eims_enable_mask = 0;
552 if (hw->mac.type == e1000_82576)
553 /* Turn on MSI-X capability first, or our settings
554 * won't stick. And it will take days to debug. */
555 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
556 E1000_GPIE_PBA | E1000_GPIE_EIAME |
557 E1000_GPIE_NSICR);
558
559 for (i = 0; i < adapter->num_tx_queues; i++) {
560 struct igb_ring *tx_ring = &adapter->tx_ring[i];
561 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
562 adapter->eims_enable_mask |= tx_ring->eims_value;
563 if (tx_ring->itr_val)
564 writel(tx_ring->itr_val,
565 hw->hw_addr + tx_ring->itr_register);
566 else
567 writel(1, hw->hw_addr + tx_ring->itr_register);
568 }
569
570 for (i = 0; i < adapter->num_rx_queues; i++) {
571 struct igb_ring *rx_ring = &adapter->rx_ring[i];
572 rx_ring->buddy = NULL;
573 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
574 adapter->eims_enable_mask |= rx_ring->eims_value;
575 if (rx_ring->itr_val)
576 writel(rx_ring->itr_val,
577 hw->hw_addr + rx_ring->itr_register);
578 else
579 writel(1, hw->hw_addr + rx_ring->itr_register);
580 }
581
582 465
583 /* set vector for other causes, i.e. link changes */ 466 /* set vector for other causes, i.e. link changes */
584 switch (hw->mac.type) { 467 switch (hw->mac.type) {
585 case e1000_82575: 468 case e1000_82575:
586 array_wr32(E1000_MSIXBM(0), vector++,
587 E1000_EIMS_OTHER);
588
589 tmp = rd32(E1000_CTRL_EXT); 469 tmp = rd32(E1000_CTRL_EXT);
590 /* enable MSI-X PBA support*/ 470 /* enable MSI-X PBA support*/
591 tmp |= E1000_CTRL_EXT_PBA_CLR; 471 tmp |= E1000_CTRL_EXT_PBA_CLR;
@@ -595,22 +475,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
595 tmp |= E1000_CTRL_EXT_IRCA; 475 tmp |= E1000_CTRL_EXT_IRCA;
596 476
597 wr32(E1000_CTRL_EXT, tmp); 477 wr32(E1000_CTRL_EXT, tmp);
598 adapter->eims_enable_mask |= E1000_EIMS_OTHER; 478
479 /* enable msix_other interrupt */
480 array_wr32(E1000_MSIXBM(0), vector++,
481 E1000_EIMS_OTHER);
599 adapter->eims_other = E1000_EIMS_OTHER; 482 adapter->eims_other = E1000_EIMS_OTHER;
600 483
601 break; 484 break;
602 485
603 case e1000_82576: 486 case e1000_82576:
487 /* Turn on MSI-X capability first, or our settings
488 * won't stick. And it will take days to debug. */
489 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
490 E1000_GPIE_PBA | E1000_GPIE_EIAME |
491 E1000_GPIE_NSICR);
492
493 /* enable msix_other interrupt */
494 adapter->eims_other = 1 << vector;
604 tmp = (vector++ | E1000_IVAR_VALID) << 8; 495 tmp = (vector++ | E1000_IVAR_VALID) << 8;
605 wr32(E1000_IVAR_MISC, tmp);
606 496
607 adapter->eims_enable_mask = (1 << (vector)) - 1; 497 wr32(E1000_IVAR_MISC, tmp);
608 adapter->eims_other = 1 << (vector - 1);
609 break; 498 break;
610 default: 499 default:
611 /* do nothing, since nothing else supports MSI-X */ 500 /* do nothing, since nothing else supports MSI-X */
612 break; 501 break;
613 } /* switch (hw->mac.type) */ 502 } /* switch (hw->mac.type) */
503
504 adapter->eims_enable_mask |= adapter->eims_other;
505
506 for (i = 0; i < adapter->num_q_vectors; i++) {
507 struct igb_q_vector *q_vector = adapter->q_vector[i];
508 igb_assign_vector(q_vector, vector++);
509 adapter->eims_enable_mask |= q_vector->eims_value;
510 }
511
614 wrfl(); 512 wrfl();
615} 513}
616 514
@@ -623,43 +521,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
623static int igb_request_msix(struct igb_adapter *adapter) 521static int igb_request_msix(struct igb_adapter *adapter)
624{ 522{
625 struct net_device *netdev = adapter->netdev; 523 struct net_device *netdev = adapter->netdev;
524 struct e1000_hw *hw = &adapter->hw;
626 int i, err = 0, vector = 0; 525 int i, err = 0, vector = 0;
627 526
628 vector = 0; 527 err = request_irq(adapter->msix_entries[vector].vector,
629 528 &igb_msix_other, 0, netdev->name, adapter);
630 for (i = 0; i < adapter->num_tx_queues; i++) { 529 if (err)
631 struct igb_ring *ring = &(adapter->tx_ring[i]); 530 goto out;
632 sprintf(ring->name, "%s-tx-%d", netdev->name, i); 531 vector++;
633 err = request_irq(adapter->msix_entries[vector].vector, 532
634 &igb_msix_tx, 0, ring->name, 533 for (i = 0; i < adapter->num_q_vectors; i++) {
635 &(adapter->tx_ring[i])); 534 struct igb_q_vector *q_vector = adapter->q_vector[i];
636 if (err) 535
637 goto out; 536 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
638 ring->itr_register = E1000_EITR(0) + (vector << 2); 537
639 ring->itr_val = 976; /* ~4000 ints/sec */ 538 if (q_vector->rx_ring && q_vector->tx_ring)
640 vector++; 539 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
641 } 540 q_vector->rx_ring->queue_index);
642 for (i = 0; i < adapter->num_rx_queues; i++) { 541 else if (q_vector->tx_ring)
643 struct igb_ring *ring = &(adapter->rx_ring[i]); 542 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
644 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 543 q_vector->tx_ring->queue_index);
645 sprintf(ring->name, "%s-rx-%d", netdev->name, i); 544 else if (q_vector->rx_ring)
545 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
546 q_vector->rx_ring->queue_index);
646 else 547 else
647 memcpy(ring->name, netdev->name, IFNAMSIZ); 548 sprintf(q_vector->name, "%s-unused", netdev->name);
549
648 err = request_irq(adapter->msix_entries[vector].vector, 550 err = request_irq(adapter->msix_entries[vector].vector,
649 &igb_msix_rx, 0, ring->name, 551 &igb_msix_ring, 0, q_vector->name,
650 &(adapter->rx_ring[i])); 552 q_vector);
651 if (err) 553 if (err)
652 goto out; 554 goto out;
653 ring->itr_register = E1000_EITR(0) + (vector << 2);
654 ring->itr_val = adapter->itr;
655 vector++; 555 vector++;
656 } 556 }
657 557
658 err = request_irq(adapter->msix_entries[vector].vector,
659 &igb_msix_other, 0, netdev->name, netdev);
660 if (err)
661 goto out;
662
663 igb_configure_msix(adapter); 558 igb_configure_msix(adapter);
664 return 0; 559 return 0;
665out: 560out:
@@ -672,11 +567,44 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
672 pci_disable_msix(adapter->pdev); 567 pci_disable_msix(adapter->pdev);
673 kfree(adapter->msix_entries); 568 kfree(adapter->msix_entries);
674 adapter->msix_entries = NULL; 569 adapter->msix_entries = NULL;
675 } else if (adapter->flags & IGB_FLAG_HAS_MSI) 570 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
676 pci_disable_msi(adapter->pdev); 571 pci_disable_msi(adapter->pdev);
677 return; 572 }
678} 573}
679 574
575/**
576 * igb_free_q_vectors - Free memory allocated for interrupt vectors
577 * @adapter: board private structure to initialize
578 *
579 * This function frees the memory allocated to the q_vectors. In addition if
580 * NAPI is enabled it will delete any references to the NAPI struct prior
581 * to freeing the q_vector.
582 **/
583static void igb_free_q_vectors(struct igb_adapter *adapter)
584{
585 int v_idx;
586
587 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
588 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
589 adapter->q_vector[v_idx] = NULL;
590 netif_napi_del(&q_vector->napi);
591 kfree(q_vector);
592 }
593 adapter->num_q_vectors = 0;
594}
595
596/**
597 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
598 *
599 * This function resets the device so that it has 0 rx queues, tx queues, and
600 * MSI-X interrupts allocated.
601 */
602static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
603{
604 igb_free_queues(adapter);
605 igb_free_q_vectors(adapter);
606 igb_reset_interrupt_capability(adapter);
607}
680 608
681/** 609/**
682 * igb_set_interrupt_capability - set MSI or MSI-X if supported 610 * igb_set_interrupt_capability - set MSI or MSI-X if supported
@@ -690,11 +618,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
690 int numvecs, i; 618 int numvecs, i;
691 619
692 /* Number of supported queues. */ 620 /* Number of supported queues. */
693 /* Having more queues than CPUs doesn't make sense. */ 621 adapter->num_rx_queues = adapter->rss_queues;
694 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 622 adapter->num_tx_queues = adapter->rss_queues;
695 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); 623
624 /* start with one vector for every rx queue */
625 numvecs = adapter->num_rx_queues;
626
627 /* if tx handler is seperate add 1 for every tx queue */
628 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
629 numvecs += adapter->num_tx_queues;
696 630
697 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1; 631 /* store the number of vectors reserved for queues */
632 adapter->num_q_vectors = numvecs;
633
634 /* add 1 vector for link status interrupts */
635 numvecs++;
698 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 636 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
699 GFP_KERNEL); 637 GFP_KERNEL);
700 if (!adapter->msix_entries) 638 if (!adapter->msix_entries)
@@ -728,8 +666,12 @@ msi_only:
728 dev_info(&adapter->pdev->dev, "IOV Disabled\n"); 666 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
729 } 667 }
730#endif 668#endif
669 adapter->vfs_allocated_count = 0;
670 adapter->rss_queues = 1;
671 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
731 adapter->num_rx_queues = 1; 672 adapter->num_rx_queues = 1;
732 adapter->num_tx_queues = 1; 673 adapter->num_tx_queues = 1;
674 adapter->num_q_vectors = 1;
733 if (!pci_enable_msi(adapter->pdev)) 675 if (!pci_enable_msi(adapter->pdev))
734 adapter->flags |= IGB_FLAG_HAS_MSI; 676 adapter->flags |= IGB_FLAG_HAS_MSI;
735out: 677out:
@@ -739,6 +681,143 @@ out:
739} 681}
740 682
741/** 683/**
684 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
685 * @adapter: board private structure to initialize
686 *
687 * We allocate one q_vector per queue interrupt. If allocation fails we
688 * return -ENOMEM.
689 **/
690static int igb_alloc_q_vectors(struct igb_adapter *adapter)
691{
692 struct igb_q_vector *q_vector;
693 struct e1000_hw *hw = &adapter->hw;
694 int v_idx;
695
696 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
697 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
698 if (!q_vector)
699 goto err_out;
700 q_vector->adapter = adapter;
701 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
702 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
703 q_vector->itr_val = IGB_START_ITR;
704 q_vector->set_itr = 1;
705 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
706 adapter->q_vector[v_idx] = q_vector;
707 }
708 return 0;
709
710err_out:
711 while (v_idx) {
712 v_idx--;
713 q_vector = adapter->q_vector[v_idx];
714 netif_napi_del(&q_vector->napi);
715 kfree(q_vector);
716 adapter->q_vector[v_idx] = NULL;
717 }
718 return -ENOMEM;
719}
720
721static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
722 int ring_idx, int v_idx)
723{
724 struct igb_q_vector *q_vector;
725
726 q_vector = adapter->q_vector[v_idx];
727 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
728 q_vector->rx_ring->q_vector = q_vector;
729 q_vector->itr_val = adapter->rx_itr_setting;
730 if (q_vector->itr_val && q_vector->itr_val <= 3)
731 q_vector->itr_val = IGB_START_ITR;
732}
733
734static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
735 int ring_idx, int v_idx)
736{
737 struct igb_q_vector *q_vector;
738
739 q_vector = adapter->q_vector[v_idx];
740 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
741 q_vector->tx_ring->q_vector = q_vector;
742 q_vector->itr_val = adapter->tx_itr_setting;
743 if (q_vector->itr_val && q_vector->itr_val <= 3)
744 q_vector->itr_val = IGB_START_ITR;
745}
746
747/**
748 * igb_map_ring_to_vector - maps allocated queues to vectors
749 *
750 * This function maps the recently allocated queues to vectors.
751 **/
752static int igb_map_ring_to_vector(struct igb_adapter *adapter)
753{
754 int i;
755 int v_idx = 0;
756
757 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
758 (adapter->num_q_vectors < adapter->num_tx_queues))
759 return -ENOMEM;
760
761 if (adapter->num_q_vectors >=
762 (adapter->num_rx_queues + adapter->num_tx_queues)) {
763 for (i = 0; i < adapter->num_rx_queues; i++)
764 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
765 for (i = 0; i < adapter->num_tx_queues; i++)
766 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
767 } else {
768 for (i = 0; i < adapter->num_rx_queues; i++) {
769 if (i < adapter->num_tx_queues)
770 igb_map_tx_ring_to_vector(adapter, i, v_idx);
771 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
772 }
773 for (; i < adapter->num_tx_queues; i++)
774 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
775 }
776 return 0;
777}
778
779/**
780 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
781 *
782 * This function initializes the interrupts and allocates all of the queues.
783 **/
784static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
785{
786 struct pci_dev *pdev = adapter->pdev;
787 int err;
788
789 igb_set_interrupt_capability(adapter);
790
791 err = igb_alloc_q_vectors(adapter);
792 if (err) {
793 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
794 goto err_alloc_q_vectors;
795 }
796
797 err = igb_alloc_queues(adapter);
798 if (err) {
799 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
800 goto err_alloc_queues;
801 }
802
803 err = igb_map_ring_to_vector(adapter);
804 if (err) {
805 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
806 goto err_map_queues;
807 }
808
809
810 return 0;
811err_map_queues:
812 igb_free_queues(adapter);
813err_alloc_queues:
814 igb_free_q_vectors(adapter);
815err_alloc_q_vectors:
816 igb_reset_interrupt_capability(adapter);
817 return err;
818}
819
820/**
742 * igb_request_irq - initialize interrupts 821 * igb_request_irq - initialize interrupts
743 * 822 *
744 * Attempts to configure interrupts using the best available 823 * Attempts to configure interrupts using the best available
@@ -747,6 +826,7 @@ out:
747static int igb_request_irq(struct igb_adapter *adapter) 826static int igb_request_irq(struct igb_adapter *adapter)
748{ 827{
749 struct net_device *netdev = adapter->netdev; 828 struct net_device *netdev = adapter->netdev;
829 struct pci_dev *pdev = adapter->pdev;
750 struct e1000_hw *hw = &adapter->hw; 830 struct e1000_hw *hw = &adapter->hw;
751 int err = 0; 831 int err = 0;
752 832
@@ -755,18 +835,36 @@ static int igb_request_irq(struct igb_adapter *adapter)
755 if (!err) 835 if (!err)
756 goto request_done; 836 goto request_done;
757 /* fall back to MSI */ 837 /* fall back to MSI */
758 igb_reset_interrupt_capability(adapter); 838 igb_clear_interrupt_scheme(adapter);
759 if (!pci_enable_msi(adapter->pdev)) 839 if (!pci_enable_msi(adapter->pdev))
760 adapter->flags |= IGB_FLAG_HAS_MSI; 840 adapter->flags |= IGB_FLAG_HAS_MSI;
761 igb_free_all_tx_resources(adapter); 841 igb_free_all_tx_resources(adapter);
762 igb_free_all_rx_resources(adapter); 842 igb_free_all_rx_resources(adapter);
843 adapter->num_tx_queues = 1;
763 adapter->num_rx_queues = 1; 844 adapter->num_rx_queues = 1;
764 igb_alloc_queues(adapter); 845 adapter->num_q_vectors = 1;
846 err = igb_alloc_q_vectors(adapter);
847 if (err) {
848 dev_err(&pdev->dev,
849 "Unable to allocate memory for vectors\n");
850 goto request_done;
851 }
852 err = igb_alloc_queues(adapter);
853 if (err) {
854 dev_err(&pdev->dev,
855 "Unable to allocate memory for queues\n");
856 igb_free_q_vectors(adapter);
857 goto request_done;
858 }
859 igb_setup_all_tx_resources(adapter);
860 igb_setup_all_rx_resources(adapter);
765 } else { 861 } else {
766 switch (hw->mac.type) { 862 switch (hw->mac.type) {
767 case e1000_82575: 863 case e1000_82575:
768 wr32(E1000_MSIXBM(0), 864 wr32(E1000_MSIXBM(0),
769 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER)); 865 (E1000_EICR_RX_QUEUE0 |
866 E1000_EICR_TX_QUEUE0 |
867 E1000_EIMS_OTHER));
770 break; 868 break;
771 case e1000_82576: 869 case e1000_82576:
772 wr32(E1000_IVAR0, E1000_IVAR_VALID); 870 wr32(E1000_IVAR0, E1000_IVAR_VALID);
@@ -778,16 +876,17 @@ static int igb_request_irq(struct igb_adapter *adapter)
778 876
779 if (adapter->flags & IGB_FLAG_HAS_MSI) { 877 if (adapter->flags & IGB_FLAG_HAS_MSI) {
780 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, 878 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
781 netdev->name, netdev); 879 netdev->name, adapter);
782 if (!err) 880 if (!err)
783 goto request_done; 881 goto request_done;
882
784 /* fall back to legacy interrupts */ 883 /* fall back to legacy interrupts */
785 igb_reset_interrupt_capability(adapter); 884 igb_reset_interrupt_capability(adapter);
786 adapter->flags &= ~IGB_FLAG_HAS_MSI; 885 adapter->flags &= ~IGB_FLAG_HAS_MSI;
787 } 886 }
788 887
789 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, 888 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
790 netdev->name, netdev); 889 netdev->name, adapter);
791 890
792 if (err) 891 if (err)
793 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", 892 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
@@ -799,23 +898,19 @@ request_done:
799 898
800static void igb_free_irq(struct igb_adapter *adapter) 899static void igb_free_irq(struct igb_adapter *adapter)
801{ 900{
802 struct net_device *netdev = adapter->netdev;
803
804 if (adapter->msix_entries) { 901 if (adapter->msix_entries) {
805 int vector = 0, i; 902 int vector = 0, i;
806 903
807 for (i = 0; i < adapter->num_tx_queues; i++) 904 free_irq(adapter->msix_entries[vector++].vector, adapter);
808 free_irq(adapter->msix_entries[vector++].vector,
809 &(adapter->tx_ring[i]));
810 for (i = 0; i < adapter->num_rx_queues; i++)
811 free_irq(adapter->msix_entries[vector++].vector,
812 &(adapter->rx_ring[i]));
813 905
814 free_irq(adapter->msix_entries[vector++].vector, netdev); 906 for (i = 0; i < adapter->num_q_vectors; i++) {
815 return; 907 struct igb_q_vector *q_vector = adapter->q_vector[i];
908 free_irq(adapter->msix_entries[vector++].vector,
909 q_vector);
910 }
911 } else {
912 free_irq(adapter->pdev->irq, adapter);
816 } 913 }
817
818 free_irq(adapter->pdev->irq, netdev);
819} 914}
820 915
821/** 916/**
@@ -826,6 +921,11 @@ static void igb_irq_disable(struct igb_adapter *adapter)
826{ 921{
827 struct e1000_hw *hw = &adapter->hw; 922 struct e1000_hw *hw = &adapter->hw;
828 923
924 /*
925 * we need to be careful when disabling interrupts. The VFs are also
926 * mapped into these registers and so clearing the bits can cause
927 * issues on the VF drivers so we only need to clear what we set
928 */
829 if (adapter->msix_entries) { 929 if (adapter->msix_entries) {
830 u32 regval = rd32(E1000_EIAM); 930 u32 regval = rd32(E1000_EIAM);
831 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); 931 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
@@ -849,15 +949,17 @@ static void igb_irq_enable(struct igb_adapter *adapter)
849 struct e1000_hw *hw = &adapter->hw; 949 struct e1000_hw *hw = &adapter->hw;
850 950
851 if (adapter->msix_entries) { 951 if (adapter->msix_entries) {
952 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
852 u32 regval = rd32(E1000_EIAC); 953 u32 regval = rd32(E1000_EIAC);
853 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); 954 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
854 regval = rd32(E1000_EIAM); 955 regval = rd32(E1000_EIAM);
855 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); 956 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
856 wr32(E1000_EIMS, adapter->eims_enable_mask); 957 wr32(E1000_EIMS, adapter->eims_enable_mask);
857 if (adapter->vfs_allocated_count) 958 if (adapter->vfs_allocated_count) {
858 wr32(E1000_MBVFIMR, 0xFF); 959 wr32(E1000_MBVFIMR, 0xFF);
859 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB | 960 ims |= E1000_IMS_VMMB;
860 E1000_IMS_DOUTSYNC)); 961 }
962 wr32(E1000_IMS, ims);
861 } else { 963 } else {
862 wr32(E1000_IMS, IMS_ENABLE_MASK); 964 wr32(E1000_IMS, IMS_ENABLE_MASK);
863 wr32(E1000_IAM, IMS_ENABLE_MASK); 965 wr32(E1000_IAM, IMS_ENABLE_MASK);
@@ -866,24 +968,23 @@ static void igb_irq_enable(struct igb_adapter *adapter)
866 968
867static void igb_update_mng_vlan(struct igb_adapter *adapter) 969static void igb_update_mng_vlan(struct igb_adapter *adapter)
868{ 970{
869 struct net_device *netdev = adapter->netdev; 971 struct e1000_hw *hw = &adapter->hw;
870 u16 vid = adapter->hw.mng_cookie.vlan_id; 972 u16 vid = adapter->hw.mng_cookie.vlan_id;
871 u16 old_vid = adapter->mng_vlan_id; 973 u16 old_vid = adapter->mng_vlan_id;
872 if (adapter->vlgrp) {
873 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
874 if (adapter->hw.mng_cookie.status &
875 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
876 igb_vlan_rx_add_vid(netdev, vid);
877 adapter->mng_vlan_id = vid;
878 } else
879 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
880 974
881 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && 975 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
882 (vid != old_vid) && 976 /* add VID to filter table */
883 !vlan_group_get_device(adapter->vlgrp, old_vid)) 977 igb_vfta_set(hw, vid, true);
884 igb_vlan_rx_kill_vid(netdev, old_vid); 978 adapter->mng_vlan_id = vid;
885 } else 979 } else {
886 adapter->mng_vlan_id = vid; 980 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
981 }
982
983 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
984 (vid != old_vid) &&
985 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
986 /* remove VID from filter table */
987 igb_vfta_set(hw, old_vid, false);
887 } 988 }
888} 989}
889 990
@@ -907,7 +1008,6 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
907 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1008 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
908} 1009}
909 1010
910
911/** 1011/**
912 * igb_get_hw_control - get control of the h/w from f/w 1012 * igb_get_hw_control - get control of the h/w from f/w
913 * @adapter: address of board private structure 1013 * @adapter: address of board private structure
@@ -942,8 +1042,11 @@ static void igb_configure(struct igb_adapter *adapter)
942 1042
943 igb_restore_vlan(adapter); 1043 igb_restore_vlan(adapter);
944 1044
945 igb_configure_tx(adapter); 1045 igb_setup_tctl(adapter);
1046 igb_setup_mrqc(adapter);
946 igb_setup_rctl(adapter); 1047 igb_setup_rctl(adapter);
1048
1049 igb_configure_tx(adapter);
947 igb_configure_rx(adapter); 1050 igb_configure_rx(adapter);
948 1051
949 igb_rx_fifo_flush_82575(&adapter->hw); 1052 igb_rx_fifo_flush_82575(&adapter->hw);
@@ -965,7 +1068,6 @@ static void igb_configure(struct igb_adapter *adapter)
965 * igb_up - Open the interface and prepare it to handle traffic 1068 * igb_up - Open the interface and prepare it to handle traffic
966 * @adapter: board private structure 1069 * @adapter: board private structure
967 **/ 1070 **/
968
969int igb_up(struct igb_adapter *adapter) 1071int igb_up(struct igb_adapter *adapter)
970{ 1072{
971 struct e1000_hw *hw = &adapter->hw; 1073 struct e1000_hw *hw = &adapter->hw;
@@ -976,30 +1078,37 @@ int igb_up(struct igb_adapter *adapter)
976 1078
977 clear_bit(__IGB_DOWN, &adapter->state); 1079 clear_bit(__IGB_DOWN, &adapter->state);
978 1080
979 for (i = 0; i < adapter->num_rx_queues; i++) 1081 for (i = 0; i < adapter->num_q_vectors; i++) {
980 napi_enable(&adapter->rx_ring[i].napi); 1082 struct igb_q_vector *q_vector = adapter->q_vector[i];
1083 napi_enable(&q_vector->napi);
1084 }
981 if (adapter->msix_entries) 1085 if (adapter->msix_entries)
982 igb_configure_msix(adapter); 1086 igb_configure_msix(adapter);
983 1087
984 igb_vmm_control(adapter);
985 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
986 igb_set_vmolr(hw, adapter->vfs_allocated_count);
987
988 /* Clear any pending interrupts. */ 1088 /* Clear any pending interrupts. */
989 rd32(E1000_ICR); 1089 rd32(E1000_ICR);
990 igb_irq_enable(adapter); 1090 igb_irq_enable(adapter);
991 1091
1092 /* notify VFs that reset has been completed */
1093 if (adapter->vfs_allocated_count) {
1094 u32 reg_data = rd32(E1000_CTRL_EXT);
1095 reg_data |= E1000_CTRL_EXT_PFRSTD;
1096 wr32(E1000_CTRL_EXT, reg_data);
1097 }
1098
992 netif_tx_start_all_queues(adapter->netdev); 1099 netif_tx_start_all_queues(adapter->netdev);
993 1100
994 /* Fire a link change interrupt to start the watchdog. */ 1101 /* start the watchdog. */
995 wr32(E1000_ICS, E1000_ICS_LSC); 1102 hw->mac.get_link_status = 1;
1103 schedule_work(&adapter->watchdog_task);
1104
996 return 0; 1105 return 0;
997} 1106}
998 1107
999void igb_down(struct igb_adapter *adapter) 1108void igb_down(struct igb_adapter *adapter)
1000{ 1109{
1001 struct e1000_hw *hw = &adapter->hw;
1002 struct net_device *netdev = adapter->netdev; 1110 struct net_device *netdev = adapter->netdev;
1111 struct e1000_hw *hw = &adapter->hw;
1003 u32 tctl, rctl; 1112 u32 tctl, rctl;
1004 int i; 1113 int i;
1005 1114
@@ -1022,8 +1131,10 @@ void igb_down(struct igb_adapter *adapter)
1022 wrfl(); 1131 wrfl();
1023 msleep(10); 1132 msleep(10);
1024 1133
1025 for (i = 0; i < adapter->num_rx_queues; i++) 1134 for (i = 0; i < adapter->num_q_vectors; i++) {
1026 napi_disable(&adapter->rx_ring[i].napi); 1135 struct igb_q_vector *q_vector = adapter->q_vector[i];
1136 napi_disable(&q_vector->napi);
1137 }
1027 1138
1028 igb_irq_disable(adapter); 1139 igb_irq_disable(adapter);
1029 1140
@@ -1062,6 +1173,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
1062 1173
1063void igb_reset(struct igb_adapter *adapter) 1174void igb_reset(struct igb_adapter *adapter)
1064{ 1175{
1176 struct pci_dev *pdev = adapter->pdev;
1065 struct e1000_hw *hw = &adapter->hw; 1177 struct e1000_hw *hw = &adapter->hw;
1066 struct e1000_mac_info *mac = &hw->mac; 1178 struct e1000_mac_info *mac = &hw->mac;
1067 struct e1000_fc_info *fc = &hw->fc; 1179 struct e1000_fc_info *fc = &hw->fc;
@@ -1073,7 +1185,8 @@ void igb_reset(struct igb_adapter *adapter)
1073 */ 1185 */
1074 switch (mac->type) { 1186 switch (mac->type) {
1075 case e1000_82576: 1187 case e1000_82576:
1076 pba = E1000_PBA_64K; 1188 pba = rd32(E1000_RXPBS);
1189 pba &= E1000_RXPBS_SIZE_MASK_82576;
1077 break; 1190 break;
1078 case e1000_82575: 1191 case e1000_82575:
1079 default: 1192 default:
@@ -1148,10 +1261,10 @@ void igb_reset(struct igb_adapter *adapter)
1148 if (adapter->vfs_allocated_count) { 1261 if (adapter->vfs_allocated_count) {
1149 int i; 1262 int i;
1150 for (i = 0 ; i < adapter->vfs_allocated_count; i++) 1263 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1151 adapter->vf_data[i].clear_to_send = false; 1264 adapter->vf_data[i].flags = 0;
1152 1265
1153 /* ping all the active vfs to let them know we are going down */ 1266 /* ping all the active vfs to let them know we are going down */
1154 igb_ping_all_vfs(adapter); 1267 igb_ping_all_vfs(adapter);
1155 1268
1156 /* disable transmits and receives */ 1269 /* disable transmits and receives */
1157 wr32(E1000_VFRE, 0); 1270 wr32(E1000_VFRE, 0);
@@ -1159,23 +1272,23 @@ void igb_reset(struct igb_adapter *adapter)
1159 } 1272 }
1160 1273
1161 /* Allow time for pending master requests to run */ 1274 /* Allow time for pending master requests to run */
1162 adapter->hw.mac.ops.reset_hw(&adapter->hw); 1275 hw->mac.ops.reset_hw(hw);
1163 wr32(E1000_WUC, 0); 1276 wr32(E1000_WUC, 0);
1164 1277
1165 if (adapter->hw.mac.ops.init_hw(&adapter->hw)) 1278 if (hw->mac.ops.init_hw(hw))
1166 dev_err(&adapter->pdev->dev, "Hardware Error\n"); 1279 dev_err(&pdev->dev, "Hardware Error\n");
1167 1280
1168 igb_update_mng_vlan(adapter); 1281 igb_update_mng_vlan(adapter);
1169 1282
1170 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 1283 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1171 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 1284 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1172 1285
1173 igb_reset_adaptive(&adapter->hw); 1286 igb_reset_adaptive(hw);
1174 igb_get_phy_info(&adapter->hw); 1287 igb_get_phy_info(hw);
1175} 1288}
1176 1289
1177static const struct net_device_ops igb_netdev_ops = { 1290static const struct net_device_ops igb_netdev_ops = {
1178 .ndo_open = igb_open, 1291 .ndo_open = igb_open,
1179 .ndo_stop = igb_close, 1292 .ndo_stop = igb_close,
1180 .ndo_start_xmit = igb_xmit_frame_adv, 1293 .ndo_start_xmit = igb_xmit_frame_adv,
1181 .ndo_get_stats = igb_get_stats, 1294 .ndo_get_stats = igb_get_stats,
@@ -1211,10 +1324,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1211 struct net_device *netdev; 1324 struct net_device *netdev;
1212 struct igb_adapter *adapter; 1325 struct igb_adapter *adapter;
1213 struct e1000_hw *hw; 1326 struct e1000_hw *hw;
1327 u16 eeprom_data = 0;
1328 static int global_quad_port_a; /* global quad port a indication */
1214 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1329 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1215 unsigned long mmio_start, mmio_len; 1330 unsigned long mmio_start, mmio_len;
1216 int err, pci_using_dac; 1331 int err, pci_using_dac;
1217 u16 eeprom_data = 0;
1218 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1332 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1219 u32 part_num; 1333 u32 part_num;
1220 1334
@@ -1291,8 +1405,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1291 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1405 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1292 hw->subsystem_device_id = pdev->subsystem_device; 1406 hw->subsystem_device_id = pdev->subsystem_device;
1293 1407
1294 /* setup the private structure */
1295 hw->back = adapter;
1296 /* Copy the default MAC, PHY and NVM function pointers */ 1408 /* Copy the default MAC, PHY and NVM function pointers */
1297 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 1409 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1298 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 1410 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -1302,46 +1414,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1302 if (err) 1414 if (err)
1303 goto err_sw_init; 1415 goto err_sw_init;
1304 1416
1305#ifdef CONFIG_PCI_IOV
1306 /* since iov functionality isn't critical to base device function we
1307 * can accept failure. If it fails we don't allow iov to be enabled */
1308 if (hw->mac.type == e1000_82576) {
1309 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1310 unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
1311 int i;
1312 unsigned char mac_addr[ETH_ALEN];
1313
1314 if (num_vfs) {
1315 adapter->vf_data = kcalloc(num_vfs,
1316 sizeof(struct vf_data_storage),
1317 GFP_KERNEL);
1318 if (!adapter->vf_data) {
1319 dev_err(&pdev->dev,
1320 "Could not allocate VF private data - "
1321 "IOV enable failed\n");
1322 } else {
1323 err = pci_enable_sriov(pdev, num_vfs);
1324 if (!err) {
1325 adapter->vfs_allocated_count = num_vfs;
1326 dev_info(&pdev->dev,
1327 "%d vfs allocated\n",
1328 num_vfs);
1329 for (i = 0;
1330 i < adapter->vfs_allocated_count;
1331 i++) {
1332 random_ether_addr(mac_addr);
1333 igb_set_vf_mac(adapter, i,
1334 mac_addr);
1335 }
1336 } else {
1337 kfree(adapter->vf_data);
1338 adapter->vf_data = NULL;
1339 }
1340 }
1341 }
1342 }
1343
1344#endif
1345 /* setup the private structure */ 1417 /* setup the private structure */
1346 err = igb_sw_init(adapter); 1418 err = igb_sw_init(adapter);
1347 if (err) 1419 if (err)
@@ -1349,16 +1421,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1349 1421
1350 igb_get_bus_info_pcie(hw); 1422 igb_get_bus_info_pcie(hw);
1351 1423
1352 /* set flags */
1353 switch (hw->mac.type) {
1354 case e1000_82575:
1355 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1356 break;
1357 case e1000_82576:
1358 default:
1359 break;
1360 }
1361
1362 hw->phy.autoneg_wait_to_complete = false; 1424 hw->phy.autoneg_wait_to_complete = false;
1363 hw->mac.adaptive_ifs = true; 1425 hw->mac.adaptive_ifs = true;
1364 1426
@@ -1382,7 +1444,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1382 netdev->features |= NETIF_F_IPV6_CSUM; 1444 netdev->features |= NETIF_F_IPV6_CSUM;
1383 netdev->features |= NETIF_F_TSO; 1445 netdev->features |= NETIF_F_TSO;
1384 netdev->features |= NETIF_F_TSO6; 1446 netdev->features |= NETIF_F_TSO6;
1385
1386 netdev->features |= NETIF_F_GRO; 1447 netdev->features |= NETIF_F_GRO;
1387 1448
1388 netdev->vlan_features |= NETIF_F_TSO; 1449 netdev->vlan_features |= NETIF_F_TSO;
@@ -1394,10 +1455,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1394 if (pci_using_dac) 1455 if (pci_using_dac)
1395 netdev->features |= NETIF_F_HIGHDMA; 1456 netdev->features |= NETIF_F_HIGHDMA;
1396 1457
1397 if (adapter->hw.mac.type == e1000_82576) 1458 if (hw->mac.type >= e1000_82576)
1398 netdev->features |= NETIF_F_SCTP_CSUM; 1459 netdev->features |= NETIF_F_SCTP_CSUM;
1399 1460
1400 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); 1461 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
1401 1462
1402 /* before reading the NVM, reset the controller to put the device in a 1463 /* before reading the NVM, reset the controller to put the device in a
1403 * known good starting state */ 1464 * known good starting state */
@@ -1439,9 +1500,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1439 hw->fc.requested_mode = e1000_fc_default; 1500 hw->fc.requested_mode = e1000_fc_default;
1440 hw->fc.current_mode = e1000_fc_default; 1501 hw->fc.current_mode = e1000_fc_default;
1441 1502
1442 adapter->itr_setting = IGB_DEFAULT_ITR;
1443 adapter->itr = IGB_START_ITR;
1444
1445 igb_validate_mdi_setting(hw); 1503 igb_validate_mdi_setting(hw);
1446 1504
1447 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, 1505 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
@@ -1508,66 +1566,14 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1508 dev_info(&pdev->dev, "DCA enabled\n"); 1566 dev_info(&pdev->dev, "DCA enabled\n");
1509 igb_setup_dca(adapter); 1567 igb_setup_dca(adapter);
1510 } 1568 }
1511#endif
1512
1513 /*
1514 * Initialize hardware timer: we keep it running just in case
1515 * that some program needs it later on.
1516 */
1517 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1518 adapter->cycles.read = igb_read_clock;
1519 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1520 adapter->cycles.mult = 1;
1521 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1522 wr32(E1000_TIMINCA,
1523 (1<<24) |
1524 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1525#if 0
1526 /*
1527 * Avoid rollover while we initialize by resetting the time counter.
1528 */
1529 wr32(E1000_SYSTIML, 0x00000000);
1530 wr32(E1000_SYSTIMH, 0x00000000);
1531#else
1532 /*
1533 * Set registers so that rollover occurs soon to test this.
1534 */
1535 wr32(E1000_SYSTIML, 0x00000000);
1536 wr32(E1000_SYSTIMH, 0xFF800000);
1537#endif
1538 wrfl();
1539 timecounter_init(&adapter->clock,
1540 &adapter->cycles,
1541 ktime_to_ns(ktime_get_real()));
1542
1543 /*
1544 * Synchronize our NIC clock against system wall clock. NIC
1545 * time stamp reading requires ~3us per sample, each sample
1546 * was pretty stable even under load => only require 10
1547 * samples for each offset comparison.
1548 */
1549 memset(&adapter->compare, 0, sizeof(adapter->compare));
1550 adapter->compare.source = &adapter->clock;
1551 adapter->compare.target = ktime_get_real;
1552 adapter->compare.num_samples = 10;
1553 timecompare_update(&adapter->compare, 0);
1554 1569
1555#ifdef DEBUG
1556 {
1557 char buffer[160];
1558 printk(KERN_DEBUG
1559 "igb: %s: hw %p initialized timer\n",
1560 igb_get_time_str(adapter, buffer),
1561 &adapter->hw);
1562 }
1563#endif 1570#endif
1564
1565 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 1571 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1566 /* print bus type/speed/width info */ 1572 /* print bus type/speed/width info */
1567 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1573 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1568 netdev->name, 1574 netdev->name,
1569 ((hw->bus.speed == e1000_bus_speed_2500) 1575 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1570 ? "2.5Gb/s" : "unknown"), 1576 "unknown"),
1571 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 1577 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1572 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 1578 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1573 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : 1579 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
@@ -1594,15 +1600,14 @@ err_eeprom:
1594 1600
1595 if (hw->flash_address) 1601 if (hw->flash_address)
1596 iounmap(hw->flash_address); 1602 iounmap(hw->flash_address);
1597
1598 igb_free_queues(adapter);
1599err_sw_init: 1603err_sw_init:
1604 igb_clear_interrupt_scheme(adapter);
1600 iounmap(hw->hw_addr); 1605 iounmap(hw->hw_addr);
1601err_ioremap: 1606err_ioremap:
1602 free_netdev(netdev); 1607 free_netdev(netdev);
1603err_alloc_etherdev: 1608err_alloc_etherdev:
1604 pci_release_selected_regions(pdev, pci_select_bars(pdev, 1609 pci_release_selected_regions(pdev,
1605 IORESOURCE_MEM)); 1610 pci_select_bars(pdev, IORESOURCE_MEM));
1606err_pci_reg: 1611err_pci_reg:
1607err_dma: 1612err_dma:
1608 pci_disable_device(pdev); 1613 pci_disable_device(pdev);
@@ -1647,12 +1652,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1647 1652
1648 unregister_netdev(netdev); 1653 unregister_netdev(netdev);
1649 1654
1650 if (!igb_check_reset_block(&adapter->hw)) 1655 if (!igb_check_reset_block(hw))
1651 igb_reset_phy(&adapter->hw); 1656 igb_reset_phy(hw);
1652
1653 igb_reset_interrupt_capability(adapter);
1654 1657
1655 igb_free_queues(adapter); 1658 igb_clear_interrupt_scheme(adapter);
1656 1659
1657#ifdef CONFIG_PCI_IOV 1660#ifdef CONFIG_PCI_IOV
1658 /* reclaim resources allocated to VFs */ 1661 /* reclaim resources allocated to VFs */
@@ -1668,11 +1671,12 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1668 dev_info(&pdev->dev, "IOV Disabled\n"); 1671 dev_info(&pdev->dev, "IOV Disabled\n");
1669 } 1672 }
1670#endif 1673#endif
1674
1671 iounmap(hw->hw_addr); 1675 iounmap(hw->hw_addr);
1672 if (hw->flash_address) 1676 if (hw->flash_address)
1673 iounmap(hw->flash_address); 1677 iounmap(hw->flash_address);
1674 pci_release_selected_regions(pdev, pci_select_bars(pdev, 1678 pci_release_selected_regions(pdev,
1675 IORESOURCE_MEM)); 1679 pci_select_bars(pdev, IORESOURCE_MEM));
1676 1680
1677 free_netdev(netdev); 1681 free_netdev(netdev);
1678 1682
@@ -1682,6 +1686,118 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1682} 1686}
1683 1687
1684/** 1688/**
1689 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1690 * @adapter: board private structure to initialize
1691 *
1692 * This function initializes the vf specific data storage and then attempts to
1693 * allocate the VFs. The reason for ordering it this way is because it is much
1694 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1695 * the memory for the VFs.
1696 **/
1697static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1698{
1699#ifdef CONFIG_PCI_IOV
1700 struct pci_dev *pdev = adapter->pdev;
1701
1702 if (adapter->vfs_allocated_count > 7)
1703 adapter->vfs_allocated_count = 7;
1704
1705 if (adapter->vfs_allocated_count) {
1706 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1707 sizeof(struct vf_data_storage),
1708 GFP_KERNEL);
1709 /* if allocation failed then we do not support SR-IOV */
1710 if (!adapter->vf_data) {
1711 adapter->vfs_allocated_count = 0;
1712 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1713 "Data Storage\n");
1714 }
1715 }
1716
1717 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1718 kfree(adapter->vf_data);
1719 adapter->vf_data = NULL;
1720#endif /* CONFIG_PCI_IOV */
1721 adapter->vfs_allocated_count = 0;
1722#ifdef CONFIG_PCI_IOV
1723 } else {
1724 unsigned char mac_addr[ETH_ALEN];
1725 int i;
1726 dev_info(&pdev->dev, "%d vfs allocated\n",
1727 adapter->vfs_allocated_count);
1728 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1729 random_ether_addr(mac_addr);
1730 igb_set_vf_mac(adapter, i, mac_addr);
1731 }
1732 }
1733#endif /* CONFIG_PCI_IOV */
1734}
1735
1736
1737/**
1738 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1739 * @adapter: board private structure to initialize
1740 *
1741 * igb_init_hw_timer initializes the function pointer and values for the hw
1742 * timer found in hardware.
1743 **/
1744static void igb_init_hw_timer(struct igb_adapter *adapter)
1745{
1746 struct e1000_hw *hw = &adapter->hw;
1747
1748 switch (hw->mac.type) {
1749 case e1000_82576:
1750 /*
1751 * Initialize hardware timer: we keep it running just in case
1752 * that some program needs it later on.
1753 */
1754 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1755 adapter->cycles.read = igb_read_clock;
1756 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1757 adapter->cycles.mult = 1;
1758 /**
1759 * Scale the NIC clock cycle by a large factor so that
1760 * relatively small clock corrections can be added or
1761 * substracted at each clock tick. The drawbacks of a large
1762 * factor are a) that the clock register overflows more quickly
1763 * (not such a big deal) and b) that the increment per tick has
1764 * to fit into 24 bits. As a result we need to use a shift of
1765 * 19 so we can fit a value of 16 into the TIMINCA register.
1766 */
1767 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1768 wr32(E1000_TIMINCA,
1769 (1 << E1000_TIMINCA_16NS_SHIFT) |
1770 (16 << IGB_82576_TSYNC_SHIFT));
1771
1772 /* Set registers so that rollover occurs soon to test this. */
1773 wr32(E1000_SYSTIML, 0x00000000);
1774 wr32(E1000_SYSTIMH, 0xFF800000);
1775 wrfl();
1776
1777 timecounter_init(&adapter->clock,
1778 &adapter->cycles,
1779 ktime_to_ns(ktime_get_real()));
1780 /*
1781 * Synchronize our NIC clock against system wall clock. NIC
1782 * time stamp reading requires ~3us per sample, each sample
1783 * was pretty stable even under load => only require 10
1784 * samples for each offset comparison.
1785 */
1786 memset(&adapter->compare, 0, sizeof(adapter->compare));
1787 adapter->compare.source = &adapter->clock;
1788 adapter->compare.target = ktime_get_real;
1789 adapter->compare.num_samples = 10;
1790 timecompare_update(&adapter->compare, 0);
1791 break;
1792 case e1000_82575:
1793 /* 82575 does not support timesync */
1794 default:
1795 break;
1796 }
1797
1798}
1799
1800/**
1685 * igb_sw_init - Initialize general software structures (struct igb_adapter) 1801 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1686 * @adapter: board private structure to initialize 1802 * @adapter: board private structure to initialize
1687 * 1803 *
@@ -1699,20 +1815,37 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1699 1815
1700 adapter->tx_ring_count = IGB_DEFAULT_TXD; 1816 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1701 adapter->rx_ring_count = IGB_DEFAULT_RXD; 1817 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1702 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1818 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1703 adapter->rx_ps_hdr_size = 0; /* disable packet split */ 1819 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1820
1704 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1821 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1705 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1822 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1706 1823
1707 /* This call may decrease the number of queues depending on 1824#ifdef CONFIG_PCI_IOV
1708 * interrupt mode. */ 1825 if (hw->mac.type == e1000_82576)
1709 igb_set_interrupt_capability(adapter); 1826 adapter->vfs_allocated_count = max_vfs;
1827
1828#endif /* CONFIG_PCI_IOV */
1829 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1830
1831 /*
1832 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1833 * then we should combine the queues into a queue pair in order to
1834 * conserve interrupts due to limited supply
1835 */
1836 if ((adapter->rss_queues > 4) ||
1837 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1838 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1710 1839
1711 if (igb_alloc_queues(adapter)) { 1840 /* This call may decrease the number of queues */
1841 if (igb_init_interrupt_scheme(adapter)) {
1712 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 1842 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1713 return -ENOMEM; 1843 return -ENOMEM;
1714 } 1844 }
1715 1845
1846 igb_init_hw_timer(adapter);
1847 igb_probe_vfs(adapter);
1848
1716 /* Explicitly disable IRQ since the NIC can be in any state. */ 1849 /* Explicitly disable IRQ since the NIC can be in any state. */
1717 igb_irq_disable(adapter); 1850 igb_irq_disable(adapter);
1718 1851
@@ -1757,21 +1890,12 @@ static int igb_open(struct net_device *netdev)
1757 1890
1758 /* e1000_power_up_phy(adapter); */ 1891 /* e1000_power_up_phy(adapter); */
1759 1892
1760 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1761 if ((adapter->hw.mng_cookie.status &
1762 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1763 igb_update_mng_vlan(adapter);
1764
1765 /* before we allocate an interrupt, we must be ready to handle it. 1893 /* before we allocate an interrupt, we must be ready to handle it.
1766 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1894 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1767 * as soon as we call pci_request_irq, so we have to setup our 1895 * as soon as we call pci_request_irq, so we have to setup our
1768 * clean_rx handler before we do so. */ 1896 * clean_rx handler before we do so. */
1769 igb_configure(adapter); 1897 igb_configure(adapter);
1770 1898
1771 igb_vmm_control(adapter);
1772 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
1773 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1774
1775 err = igb_request_irq(adapter); 1899 err = igb_request_irq(adapter);
1776 if (err) 1900 if (err)
1777 goto err_req_irq; 1901 goto err_req_irq;
@@ -1779,18 +1903,28 @@ static int igb_open(struct net_device *netdev)
1779 /* From here on the code is the same as igb_up() */ 1903 /* From here on the code is the same as igb_up() */
1780 clear_bit(__IGB_DOWN, &adapter->state); 1904 clear_bit(__IGB_DOWN, &adapter->state);
1781 1905
1782 for (i = 0; i < adapter->num_rx_queues; i++) 1906 for (i = 0; i < adapter->num_q_vectors; i++) {
1783 napi_enable(&adapter->rx_ring[i].napi); 1907 struct igb_q_vector *q_vector = adapter->q_vector[i];
1908 napi_enable(&q_vector->napi);
1909 }
1784 1910
1785 /* Clear any pending interrupts. */ 1911 /* Clear any pending interrupts. */
1786 rd32(E1000_ICR); 1912 rd32(E1000_ICR);
1787 1913
1788 igb_irq_enable(adapter); 1914 igb_irq_enable(adapter);
1789 1915
1916 /* notify VFs that reset has been completed */
1917 if (adapter->vfs_allocated_count) {
1918 u32 reg_data = rd32(E1000_CTRL_EXT);
1919 reg_data |= E1000_CTRL_EXT_PFRSTD;
1920 wr32(E1000_CTRL_EXT, reg_data);
1921 }
1922
1790 netif_tx_start_all_queues(netdev); 1923 netif_tx_start_all_queues(netdev);
1791 1924
1792 /* Fire a link status change interrupt to start the watchdog. */ 1925 /* start the watchdog. */
1793 wr32(E1000_ICS, E1000_ICS_LSC); 1926 hw->mac.get_link_status = 1;
1927 schedule_work(&adapter->watchdog_task);
1794 1928
1795 return 0; 1929 return 0;
1796 1930
@@ -1829,28 +1963,18 @@ static int igb_close(struct net_device *netdev)
1829 igb_free_all_tx_resources(adapter); 1963 igb_free_all_tx_resources(adapter);
1830 igb_free_all_rx_resources(adapter); 1964 igb_free_all_rx_resources(adapter);
1831 1965
1832 /* kill manageability vlan ID if supported, but not if a vlan with
1833 * the same ID is registered on the host OS (let 8021q kill it) */
1834 if ((adapter->hw.mng_cookie.status &
1835 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1836 !(adapter->vlgrp &&
1837 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1838 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1839
1840 return 0; 1966 return 0;
1841} 1967}
1842 1968
1843/** 1969/**
1844 * igb_setup_tx_resources - allocate Tx resources (Descriptors) 1970 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1845 * @adapter: board private structure
1846 * @tx_ring: tx descriptor ring (for a specific queue) to setup 1971 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1847 * 1972 *
1848 * Return 0 on success, negative on failure 1973 * Return 0 on success, negative on failure
1849 **/ 1974 **/
1850int igb_setup_tx_resources(struct igb_adapter *adapter, 1975int igb_setup_tx_resources(struct igb_ring *tx_ring)
1851 struct igb_ring *tx_ring)
1852{ 1976{
1853 struct pci_dev *pdev = adapter->pdev; 1977 struct pci_dev *pdev = tx_ring->pdev;
1854 int size; 1978 int size;
1855 1979
1856 size = sizeof(struct igb_buffer) * tx_ring->count; 1980 size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -1863,20 +1987,20 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
1863 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1987 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
1864 tx_ring->size = ALIGN(tx_ring->size, 4096); 1988 tx_ring->size = ALIGN(tx_ring->size, 4096);
1865 1989
1866 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1990 tx_ring->desc = pci_alloc_consistent(pdev,
1991 tx_ring->size,
1867 &tx_ring->dma); 1992 &tx_ring->dma);
1868 1993
1869 if (!tx_ring->desc) 1994 if (!tx_ring->desc)
1870 goto err; 1995 goto err;
1871 1996
1872 tx_ring->adapter = adapter;
1873 tx_ring->next_to_use = 0; 1997 tx_ring->next_to_use = 0;
1874 tx_ring->next_to_clean = 0; 1998 tx_ring->next_to_clean = 0;
1875 return 0; 1999 return 0;
1876 2000
1877err: 2001err:
1878 vfree(tx_ring->buffer_info); 2002 vfree(tx_ring->buffer_info);
1879 dev_err(&adapter->pdev->dev, 2003 dev_err(&pdev->dev,
1880 "Unable to allocate memory for the transmit descriptor ring\n"); 2004 "Unable to allocate memory for the transmit descriptor ring\n");
1881 return -ENOMEM; 2005 return -ENOMEM;
1882} 2006}
@@ -1890,13 +2014,13 @@ err:
1890 **/ 2014 **/
1891static int igb_setup_all_tx_resources(struct igb_adapter *adapter) 2015static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1892{ 2016{
2017 struct pci_dev *pdev = adapter->pdev;
1893 int i, err = 0; 2018 int i, err = 0;
1894 int r_idx;
1895 2019
1896 for (i = 0; i < adapter->num_tx_queues; i++) { 2020 for (i = 0; i < adapter->num_tx_queues; i++) {
1897 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2021 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
1898 if (err) { 2022 if (err) {
1899 dev_err(&adapter->pdev->dev, 2023 dev_err(&pdev->dev,
1900 "Allocation for Tx Queue %u failed\n", i); 2024 "Allocation for Tx Queue %u failed\n", i);
1901 for (i--; i >= 0; i--) 2025 for (i--; i >= 0; i--)
1902 igb_free_tx_resources(&adapter->tx_ring[i]); 2026 igb_free_tx_resources(&adapter->tx_ring[i]);
@@ -1904,57 +2028,24 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1904 } 2028 }
1905 } 2029 }
1906 2030
1907 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { 2031 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
1908 r_idx = i % adapter->num_tx_queues; 2032 int r_idx = i % adapter->num_tx_queues;
1909 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 2033 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1910 } 2034 }
1911 return err; 2035 return err;
1912} 2036}
1913 2037
1914/** 2038/**
1915 * igb_configure_tx - Configure transmit Unit after Reset 2039 * igb_setup_tctl - configure the transmit control registers
1916 * @adapter: board private structure 2040 * @adapter: Board private structure
1917 *
1918 * Configure the Tx unit of the MAC after a reset.
1919 **/ 2041 **/
1920static void igb_configure_tx(struct igb_adapter *adapter) 2042void igb_setup_tctl(struct igb_adapter *adapter)
1921{ 2043{
1922 u64 tdba;
1923 struct e1000_hw *hw = &adapter->hw; 2044 struct e1000_hw *hw = &adapter->hw;
1924 u32 tctl; 2045 u32 tctl;
1925 u32 txdctl, txctrl;
1926 int i, j;
1927
1928 for (i = 0; i < adapter->num_tx_queues; i++) {
1929 struct igb_ring *ring = &adapter->tx_ring[i];
1930 j = ring->reg_idx;
1931 wr32(E1000_TDLEN(j),
1932 ring->count * sizeof(union e1000_adv_tx_desc));
1933 tdba = ring->dma;
1934 wr32(E1000_TDBAL(j),
1935 tdba & 0x00000000ffffffffULL);
1936 wr32(E1000_TDBAH(j), tdba >> 32);
1937
1938 ring->head = E1000_TDH(j);
1939 ring->tail = E1000_TDT(j);
1940 writel(0, hw->hw_addr + ring->tail);
1941 writel(0, hw->hw_addr + ring->head);
1942 txdctl = rd32(E1000_TXDCTL(j));
1943 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1944 wr32(E1000_TXDCTL(j), txdctl);
1945
1946 /* Turn off Relaxed Ordering on head write-backs. The
1947 * writebacks MUST be delivered in order or it will
1948 * completely screw up our bookeeping.
1949 */
1950 txctrl = rd32(E1000_DCA_TXCTRL(j));
1951 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1952 wr32(E1000_DCA_TXCTRL(j), txctrl);
1953 }
1954 2046
1955 /* disable queue 0 to prevent tail bump w/o re-configuration */ 2047 /* disable queue 0 which is enabled by default on 82575 and 82576 */
1956 if (adapter->vfs_allocated_count) 2048 wr32(E1000_TXDCTL(0), 0);
1957 wr32(E1000_TXDCTL(0), 0);
1958 2049
1959 /* Program the Transmit Control Register */ 2050 /* Program the Transmit Control Register */
1960 tctl = rd32(E1000_TCTL); 2051 tctl = rd32(E1000_TCTL);
@@ -1964,9 +2055,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1964 2055
1965 igb_config_collision_dist(hw); 2056 igb_config_collision_dist(hw);
1966 2057
1967 /* Setup Transmit Descriptor Settings for eop descriptor */
1968 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1969
1970 /* Enable transmits */ 2058 /* Enable transmits */
1971 tctl |= E1000_TCTL_EN; 2059 tctl |= E1000_TCTL_EN;
1972 2060
@@ -1974,16 +2062,69 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1974} 2062}
1975 2063
1976/** 2064/**
1977 * igb_setup_rx_resources - allocate Rx resources (Descriptors) 2065 * igb_configure_tx_ring - Configure transmit ring after Reset
1978 * @adapter: board private structure 2066 * @adapter: board private structure
2067 * @ring: tx ring to configure
2068 *
2069 * Configure a transmit ring after a reset.
2070 **/
2071void igb_configure_tx_ring(struct igb_adapter *adapter,
2072 struct igb_ring *ring)
2073{
2074 struct e1000_hw *hw = &adapter->hw;
2075 u32 txdctl;
2076 u64 tdba = ring->dma;
2077 int reg_idx = ring->reg_idx;
2078
2079 /* disable the queue */
2080 txdctl = rd32(E1000_TXDCTL(reg_idx));
2081 wr32(E1000_TXDCTL(reg_idx),
2082 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2083 wrfl();
2084 mdelay(10);
2085
2086 wr32(E1000_TDLEN(reg_idx),
2087 ring->count * sizeof(union e1000_adv_tx_desc));
2088 wr32(E1000_TDBAL(reg_idx),
2089 tdba & 0x00000000ffffffffULL);
2090 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2091
2092 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2093 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2094 writel(0, ring->head);
2095 writel(0, ring->tail);
2096
2097 txdctl |= IGB_TX_PTHRESH;
2098 txdctl |= IGB_TX_HTHRESH << 8;
2099 txdctl |= IGB_TX_WTHRESH << 16;
2100
2101 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2102 wr32(E1000_TXDCTL(reg_idx), txdctl);
2103}
2104
2105/**
2106 * igb_configure_tx - Configure transmit Unit after Reset
2107 * @adapter: board private structure
2108 *
2109 * Configure the Tx unit of the MAC after a reset.
2110 **/
2111static void igb_configure_tx(struct igb_adapter *adapter)
2112{
2113 int i;
2114
2115 for (i = 0; i < adapter->num_tx_queues; i++)
2116 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
2117}
2118
2119/**
2120 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1979 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2121 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1980 * 2122 *
1981 * Returns 0 on success, negative on failure 2123 * Returns 0 on success, negative on failure
1982 **/ 2124 **/
1983int igb_setup_rx_resources(struct igb_adapter *adapter, 2125int igb_setup_rx_resources(struct igb_ring *rx_ring)
1984 struct igb_ring *rx_ring)
1985{ 2126{
1986 struct pci_dev *pdev = adapter->pdev; 2127 struct pci_dev *pdev = rx_ring->pdev;
1987 int size, desc_len; 2128 int size, desc_len;
1988 2129
1989 size = sizeof(struct igb_buffer) * rx_ring->count; 2130 size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2007,13 +2148,12 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
2007 rx_ring->next_to_clean = 0; 2148 rx_ring->next_to_clean = 0;
2008 rx_ring->next_to_use = 0; 2149 rx_ring->next_to_use = 0;
2009 2150
2010 rx_ring->adapter = adapter;
2011
2012 return 0; 2151 return 0;
2013 2152
2014err: 2153err:
2015 vfree(rx_ring->buffer_info); 2154 vfree(rx_ring->buffer_info);
2016 dev_err(&adapter->pdev->dev, "Unable to allocate memory for " 2155 rx_ring->buffer_info = NULL;
2156 dev_err(&pdev->dev, "Unable to allocate memory for "
2017 "the receive descriptor ring\n"); 2157 "the receive descriptor ring\n");
2018 return -ENOMEM; 2158 return -ENOMEM;
2019} 2159}
@@ -2027,12 +2167,13 @@ err:
2027 **/ 2167 **/
2028static int igb_setup_all_rx_resources(struct igb_adapter *adapter) 2168static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2029{ 2169{
2170 struct pci_dev *pdev = adapter->pdev;
2030 int i, err = 0; 2171 int i, err = 0;
2031 2172
2032 for (i = 0; i < adapter->num_rx_queues; i++) { 2173 for (i = 0; i < adapter->num_rx_queues; i++) {
2033 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2174 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
2034 if (err) { 2175 if (err) {
2035 dev_err(&adapter->pdev->dev, 2176 dev_err(&pdev->dev,
2036 "Allocation for Rx Queue %u failed\n", i); 2177 "Allocation for Rx Queue %u failed\n", i);
2037 for (i--; i >= 0; i--) 2178 for (i--; i >= 0; i--)
2038 igb_free_rx_resources(&adapter->rx_ring[i]); 2179 igb_free_rx_resources(&adapter->rx_ring[i]);
@@ -2044,15 +2185,118 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2044} 2185}
2045 2186
2046/** 2187/**
2188 * igb_setup_mrqc - configure the multiple receive queue control registers
2189 * @adapter: Board private structure
2190 **/
2191static void igb_setup_mrqc(struct igb_adapter *adapter)
2192{
2193 struct e1000_hw *hw = &adapter->hw;
2194 u32 mrqc, rxcsum;
2195 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2196 union e1000_reta {
2197 u32 dword;
2198 u8 bytes[4];
2199 } reta;
2200 static const u8 rsshash[40] = {
2201 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2202 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2203 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2204 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2205
2206 /* Fill out hash function seeds */
2207 for (j = 0; j < 10; j++) {
2208 u32 rsskey = rsshash[(j * 4)];
2209 rsskey |= rsshash[(j * 4) + 1] << 8;
2210 rsskey |= rsshash[(j * 4) + 2] << 16;
2211 rsskey |= rsshash[(j * 4) + 3] << 24;
2212 array_wr32(E1000_RSSRK(0), j, rsskey);
2213 }
2214
2215 num_rx_queues = adapter->rss_queues;
2216
2217 if (adapter->vfs_allocated_count) {
2218 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2219 switch (hw->mac.type) {
2220 case e1000_82576:
2221 shift = 3;
2222 num_rx_queues = 2;
2223 break;
2224 case e1000_82575:
2225 shift = 2;
2226 shift2 = 6;
2227 default:
2228 break;
2229 }
2230 } else {
2231 if (hw->mac.type == e1000_82575)
2232 shift = 6;
2233 }
2234
2235 for (j = 0; j < (32 * 4); j++) {
2236 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2237 if (shift2)
2238 reta.bytes[j & 3] |= num_rx_queues << shift2;
2239 if ((j & 3) == 3)
2240 wr32(E1000_RETA(j >> 2), reta.dword);
2241 }
2242
2243 /*
2244 * Disable raw packet checksumming so that RSS hash is placed in
2245 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2246 * offloads as they are enabled by default
2247 */
2248 rxcsum = rd32(E1000_RXCSUM);
2249 rxcsum |= E1000_RXCSUM_PCSD;
2250
2251 if (adapter->hw.mac.type >= e1000_82576)
2252 /* Enable Receive Checksum Offload for SCTP */
2253 rxcsum |= E1000_RXCSUM_CRCOFL;
2254
2255 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2256 wr32(E1000_RXCSUM, rxcsum);
2257
2258 /* If VMDq is enabled then we set the appropriate mode for that, else
2259 * we default to RSS so that an RSS hash is calculated per packet even
2260 * if we are only using one queue */
2261 if (adapter->vfs_allocated_count) {
2262 if (hw->mac.type > e1000_82575) {
2263 /* Set the default pool for the PF's first queue */
2264 u32 vtctl = rd32(E1000_VT_CTL);
2265 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2266 E1000_VT_CTL_DISABLE_DEF_POOL);
2267 vtctl |= adapter->vfs_allocated_count <<
2268 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2269 wr32(E1000_VT_CTL, vtctl);
2270 }
2271 if (adapter->rss_queues > 1)
2272 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2273 else
2274 mrqc = E1000_MRQC_ENABLE_VMDQ;
2275 } else {
2276 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2277 }
2278 igb_vmm_control(adapter);
2279
2280 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2281 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2282 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2283 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2284 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2285 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2286 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2287 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2288
2289 wr32(E1000_MRQC, mrqc);
2290}
2291
2292/**
2047 * igb_setup_rctl - configure the receive control registers 2293 * igb_setup_rctl - configure the receive control registers
2048 * @adapter: Board private structure 2294 * @adapter: Board private structure
2049 **/ 2295 **/
2050static void igb_setup_rctl(struct igb_adapter *adapter) 2296void igb_setup_rctl(struct igb_adapter *adapter)
2051{ 2297{
2052 struct e1000_hw *hw = &adapter->hw; 2298 struct e1000_hw *hw = &adapter->hw;
2053 u32 rctl; 2299 u32 rctl;
2054 u32 srrctl = 0;
2055 int i;
2056 2300
2057 rctl = rd32(E1000_RCTL); 2301 rctl = rd32(E1000_RCTL);
2058 2302
@@ -2069,75 +2313,45 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
2069 */ 2313 */
2070 rctl |= E1000_RCTL_SECRC; 2314 rctl |= E1000_RCTL_SECRC;
2071 2315
2072 /* 2316 /* disable store bad packets and clear size bits. */
2073 * disable store bad packets and clear size bits.
2074 */
2075 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); 2317 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2076 2318
2077 /* enable LPE when to prevent packets larger than max_frame_size */ 2319 /* enable LPE to prevent packets larger than max_frame_size */
2078 rctl |= E1000_RCTL_LPE; 2320 rctl |= E1000_RCTL_LPE;
2079 2321
2080 /* Setup buffer sizes */ 2322 /* disable queue 0 to prevent tail write w/o re-config */
2081 switch (adapter->rx_buffer_len) { 2323 wr32(E1000_RXDCTL(0), 0);
2082 case IGB_RXBUFFER_256:
2083 rctl |= E1000_RCTL_SZ_256;
2084 break;
2085 case IGB_RXBUFFER_512:
2086 rctl |= E1000_RCTL_SZ_512;
2087 break;
2088 default:
2089 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
2090 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2091 break;
2092 }
2093
2094 /* 82575 and greater support packet-split where the protocol
2095 * header is placed in skb->data and the packet data is
2096 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2097 * In the case of a non-split, skb->data is linearly filled,
2098 * followed by the page buffers. Therefore, skb->data is
2099 * sized to hold the largest protocol header.
2100 */
2101 /* allocations using alloc_page take too long for regular MTU
2102 * so only enable packet split for jumbo frames */
2103 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2104 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
2105 srrctl |= adapter->rx_ps_hdr_size <<
2106 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2107 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2108 } else {
2109 adapter->rx_ps_hdr_size = 0;
2110 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111 }
2112 2324
2113 /* Attention!!! For SR-IOV PF driver operations you must enable 2325 /* Attention!!! For SR-IOV PF driver operations you must enable
2114 * queue drop for all VF and PF queues to prevent head of line blocking 2326 * queue drop for all VF and PF queues to prevent head of line blocking
2115 * if an un-trusted VF does not provide descriptors to hardware. 2327 * if an un-trusted VF does not provide descriptors to hardware.
2116 */ 2328 */
2117 if (adapter->vfs_allocated_count) { 2329 if (adapter->vfs_allocated_count) {
2118 u32 vmolr;
2119
2120 /* set all queue drop enable bits */ 2330 /* set all queue drop enable bits */
2121 wr32(E1000_QDE, ALL_QUEUES); 2331 wr32(E1000_QDE, ALL_QUEUES);
2122 srrctl |= E1000_SRRCTL_DROP_EN; 2332 }
2123 2333
2124 /* disable queue 0 to prevent tail write w/o re-config */ 2334 wr32(E1000_RCTL, rctl);
2125 wr32(E1000_RXDCTL(0), 0); 2335}
2126 2336
2127 vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); 2337static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2128 if (rctl & E1000_RCTL_LPE) 2338 int vfn)
2129 vmolr |= E1000_VMOLR_LPE; 2339{
2130 if (adapter->num_rx_queues > 1) 2340 struct e1000_hw *hw = &adapter->hw;
2131 vmolr |= E1000_VMOLR_RSSE; 2341 u32 vmolr;
2132 wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
2133 }
2134 2342
2135 for (i = 0; i < adapter->num_rx_queues; i++) { 2343 /* if it isn't the PF check to see if VFs are enabled and
2136 int j = adapter->rx_ring[i].reg_idx; 2344 * increase the size to support vlan tags */
2137 wr32(E1000_SRRCTL(j), srrctl); 2345 if (vfn < adapter->vfs_allocated_count &&
2138 } 2346 adapter->vf_data[vfn].vlans_enabled)
2347 size += VLAN_TAG_SIZE;
2139 2348
2140 wr32(E1000_RCTL, rctl); 2349 vmolr = rd32(E1000_VMOLR(vfn));
2350 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2351 vmolr |= size | E1000_VMOLR_LPE;
2352 wr32(E1000_VMOLR(vfn), vmolr);
2353
2354 return 0;
2141} 2355}
2142 2356
2143/** 2357/**
@@ -2159,33 +2373,107 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
2159 * size and set the VMOLR RLPML to the size we need */ 2373 * size and set the VMOLR RLPML to the size we need */
2160 if (pf_id) { 2374 if (pf_id) {
2161 igb_set_vf_rlpml(adapter, max_frame_size, pf_id); 2375 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2162 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE; 2376 max_frame_size = MAX_JUMBO_FRAME_SIZE;
2163 } 2377 }
2164 2378
2165 wr32(E1000_RLPML, max_frame_size); 2379 wr32(E1000_RLPML, max_frame_size);
2166} 2380}
2167 2381
2382static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
2383{
2384 struct e1000_hw *hw = &adapter->hw;
2385 u32 vmolr;
2386
2387 /*
2388 * This register exists only on 82576 and newer so if we are older then
2389 * we should exit and do nothing
2390 */
2391 if (hw->mac.type < e1000_82576)
2392 return;
2393
2394 vmolr = rd32(E1000_VMOLR(vfn));
2395 vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
2396 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2397
2398 /* clear all bits that might not be set */
2399 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2400
2401 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
2402 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2403 /*
2404 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2405 * multicast packets
2406 */
2407 if (vfn <= adapter->vfs_allocated_count)
2408 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2409
2410 wr32(E1000_VMOLR(vfn), vmolr);
2411}
2412
2168/** 2413/**
2169 * igb_configure_vt_default_pool - Configure VT default pool 2414 * igb_configure_rx_ring - Configure a receive ring after Reset
2170 * @adapter: board private structure 2415 * @adapter: board private structure
2416 * @ring: receive ring to be configured
2171 * 2417 *
2172 * Configure the default pool 2418 * Configure the Rx unit of the MAC after a reset.
2173 **/ 2419 **/
2174static void igb_configure_vt_default_pool(struct igb_adapter *adapter) 2420void igb_configure_rx_ring(struct igb_adapter *adapter,
2421 struct igb_ring *ring)
2175{ 2422{
2176 struct e1000_hw *hw = &adapter->hw; 2423 struct e1000_hw *hw = &adapter->hw;
2177 u16 pf_id = adapter->vfs_allocated_count; 2424 u64 rdba = ring->dma;
2178 u32 vtctl; 2425 int reg_idx = ring->reg_idx;
2426 u32 srrctl, rxdctl;
2427
2428 /* disable the queue */
2429 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2430 wr32(E1000_RXDCTL(reg_idx),
2431 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2432
2433 /* Set DMA base address registers */
2434 wr32(E1000_RDBAL(reg_idx),
2435 rdba & 0x00000000ffffffffULL);
2436 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2437 wr32(E1000_RDLEN(reg_idx),
2438 ring->count * sizeof(union e1000_adv_rx_desc));
2439
2440 /* initialize head and tail */
2441 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2442 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2443 writel(0, ring->head);
2444 writel(0, ring->tail);
2445
2446 /* set descriptor configuration */
2447 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2448 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
2449 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2450#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2451 srrctl |= IGB_RXBUFFER_16384 >>
2452 E1000_SRRCTL_BSIZEPKT_SHIFT;
2453#else
2454 srrctl |= (PAGE_SIZE / 2) >>
2455 E1000_SRRCTL_BSIZEPKT_SHIFT;
2456#endif
2457 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2458 } else {
2459 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
2460 E1000_SRRCTL_BSIZEPKT_SHIFT;
2461 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2462 }
2179 2463
2180 /* not in sr-iov mode - do nothing */ 2464 wr32(E1000_SRRCTL(reg_idx), srrctl);
2181 if (!pf_id) 2465
2182 return; 2466 /* set filtering for VMDQ pools */
2467 igb_set_vmolr(adapter, reg_idx & 0x7);
2183 2468
2184 vtctl = rd32(E1000_VT_CTL); 2469 /* enable receive descriptor fetching */
2185 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | 2470 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2186 E1000_VT_CTL_DISABLE_DEF_POOL); 2471 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2187 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT; 2472 rxdctl &= 0xFFF00000;
2188 wr32(E1000_VT_CTL, vtctl); 2473 rxdctl |= IGB_RX_PTHRESH;
2474 rxdctl |= IGB_RX_HTHRESH << 8;
2475 rxdctl |= IGB_RX_WTHRESH << 16;
2476 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2189} 2477}
2190 2478
2191/** 2479/**
@@ -2196,112 +2484,19 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2196 **/ 2484 **/
2197static void igb_configure_rx(struct igb_adapter *adapter) 2485static void igb_configure_rx(struct igb_adapter *adapter)
2198{ 2486{
2199 u64 rdba;
2200 struct e1000_hw *hw = &adapter->hw;
2201 u32 rctl, rxcsum;
2202 u32 rxdctl;
2203 int i; 2487 int i;
2204 2488
2205 /* disable receives while setting up the descriptors */ 2489 /* set UTA to appropriate mode */
2206 rctl = rd32(E1000_RCTL); 2490 igb_set_uta(adapter);
2207 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2208 wrfl();
2209 mdelay(10);
2210 2491
2211 if (adapter->itr_setting > 3) 2492 /* set the correct pool for the PF default MAC address in entry 0 */
2212 wr32(E1000_ITR, adapter->itr); 2493 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2494 adapter->vfs_allocated_count);
2213 2495
2214 /* Setup the HW Rx Head and Tail Descriptor Pointers and 2496 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2215 * the Base and Length of the Rx Descriptor Ring */ 2497 * the Base and Length of the Rx Descriptor Ring */
2216 for (i = 0; i < adapter->num_rx_queues; i++) { 2498 for (i = 0; i < adapter->num_rx_queues; i++)
2217 struct igb_ring *ring = &adapter->rx_ring[i]; 2499 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
2218 int j = ring->reg_idx;
2219 rdba = ring->dma;
2220 wr32(E1000_RDBAL(j),
2221 rdba & 0x00000000ffffffffULL);
2222 wr32(E1000_RDBAH(j), rdba >> 32);
2223 wr32(E1000_RDLEN(j),
2224 ring->count * sizeof(union e1000_adv_rx_desc));
2225
2226 ring->head = E1000_RDH(j);
2227 ring->tail = E1000_RDT(j);
2228 writel(0, hw->hw_addr + ring->tail);
2229 writel(0, hw->hw_addr + ring->head);
2230
2231 rxdctl = rd32(E1000_RXDCTL(j));
2232 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2233 rxdctl &= 0xFFF00000;
2234 rxdctl |= IGB_RX_PTHRESH;
2235 rxdctl |= IGB_RX_HTHRESH << 8;
2236 rxdctl |= IGB_RX_WTHRESH << 16;
2237 wr32(E1000_RXDCTL(j), rxdctl);
2238 }
2239
2240 if (adapter->num_rx_queues > 1) {
2241 u32 random[10];
2242 u32 mrqc;
2243 u32 j, shift;
2244 union e1000_reta {
2245 u32 dword;
2246 u8 bytes[4];
2247 } reta;
2248
2249 get_random_bytes(&random[0], 40);
2250
2251 if (hw->mac.type >= e1000_82576)
2252 shift = 0;
2253 else
2254 shift = 6;
2255 for (j = 0; j < (32 * 4); j++) {
2256 reta.bytes[j & 3] =
2257 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
2258 if ((j & 3) == 3)
2259 writel(reta.dword,
2260 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2261 }
2262 if (adapter->vfs_allocated_count)
2263 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2264 else
2265 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2266
2267 /* Fill out hash function seeds */
2268 for (j = 0; j < 10; j++)
2269 array_wr32(E1000_RSSRK(0), j, random[j]);
2270
2271 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2272 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2273 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2274 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2275 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2276 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2277 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2278 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2279
2280 wr32(E1000_MRQC, mrqc);
2281 } else if (adapter->vfs_allocated_count) {
2282 /* Enable multi-queue for sr-iov */
2283 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
2284 }
2285
2286 /* Enable Receive Checksum Offload for TCP and UDP */
2287 rxcsum = rd32(E1000_RXCSUM);
2288 /* Disable raw packet checksumming */
2289 rxcsum |= E1000_RXCSUM_PCSD;
2290
2291 if (adapter->hw.mac.type == e1000_82576)
2292 /* Enable Receive Checksum Offload for SCTP */
2293 rxcsum |= E1000_RXCSUM_CRCOFL;
2294
2295 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2296 wr32(E1000_RXCSUM, rxcsum);
2297
2298 /* Set the default pool for the PF's first queue */
2299 igb_configure_vt_default_pool(adapter);
2300
2301 igb_rlpml_set(adapter);
2302
2303 /* Enable Receives */
2304 wr32(E1000_RCTL, rctl);
2305} 2500}
2306 2501
2307/** 2502/**
@@ -2312,14 +2507,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2312 **/ 2507 **/
2313void igb_free_tx_resources(struct igb_ring *tx_ring) 2508void igb_free_tx_resources(struct igb_ring *tx_ring)
2314{ 2509{
2315 struct pci_dev *pdev = tx_ring->adapter->pdev;
2316
2317 igb_clean_tx_ring(tx_ring); 2510 igb_clean_tx_ring(tx_ring);
2318 2511
2319 vfree(tx_ring->buffer_info); 2512 vfree(tx_ring->buffer_info);
2320 tx_ring->buffer_info = NULL; 2513 tx_ring->buffer_info = NULL;
2321 2514
2322 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 2515 /* if not set, then don't free */
2516 if (!tx_ring->desc)
2517 return;
2518
2519 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2520 tx_ring->desc, tx_ring->dma);
2323 2521
2324 tx_ring->desc = NULL; 2522 tx_ring->desc = NULL;
2325} 2523}
@@ -2338,12 +2536,13 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2338 igb_free_tx_resources(&adapter->tx_ring[i]); 2536 igb_free_tx_resources(&adapter->tx_ring[i]);
2339} 2537}
2340 2538
2341static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, 2539void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2342 struct igb_buffer *buffer_info) 2540 struct igb_buffer *buffer_info)
2343{ 2541{
2344 buffer_info->dma = 0; 2542 buffer_info->dma = 0;
2345 if (buffer_info->skb) { 2543 if (buffer_info->skb) {
2346 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, 2544 skb_dma_unmap(&tx_ring->pdev->dev,
2545 buffer_info->skb,
2347 DMA_TO_DEVICE); 2546 DMA_TO_DEVICE);
2348 dev_kfree_skb_any(buffer_info->skb); 2547 dev_kfree_skb_any(buffer_info->skb);
2349 buffer_info->skb = NULL; 2548 buffer_info->skb = NULL;
@@ -2358,7 +2557,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2358 **/ 2557 **/
2359static void igb_clean_tx_ring(struct igb_ring *tx_ring) 2558static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2360{ 2559{
2361 struct igb_adapter *adapter = tx_ring->adapter;
2362 struct igb_buffer *buffer_info; 2560 struct igb_buffer *buffer_info;
2363 unsigned long size; 2561 unsigned long size;
2364 unsigned int i; 2562 unsigned int i;
@@ -2369,21 +2567,17 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2369 2567
2370 for (i = 0; i < tx_ring->count; i++) { 2568 for (i = 0; i < tx_ring->count; i++) {
2371 buffer_info = &tx_ring->buffer_info[i]; 2569 buffer_info = &tx_ring->buffer_info[i];
2372 igb_unmap_and_free_tx_resource(adapter, buffer_info); 2570 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
2373 } 2571 }
2374 2572
2375 size = sizeof(struct igb_buffer) * tx_ring->count; 2573 size = sizeof(struct igb_buffer) * tx_ring->count;
2376 memset(tx_ring->buffer_info, 0, size); 2574 memset(tx_ring->buffer_info, 0, size);
2377 2575
2378 /* Zero out the descriptor ring */ 2576 /* Zero out the descriptor ring */
2379
2380 memset(tx_ring->desc, 0, tx_ring->size); 2577 memset(tx_ring->desc, 0, tx_ring->size);
2381 2578
2382 tx_ring->next_to_use = 0; 2579 tx_ring->next_to_use = 0;
2383 tx_ring->next_to_clean = 0; 2580 tx_ring->next_to_clean = 0;
2384
2385 writel(0, adapter->hw.hw_addr + tx_ring->head);
2386 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2387} 2581}
2388 2582
2389/** 2583/**
@@ -2406,14 +2600,17 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2406 **/ 2600 **/
2407void igb_free_rx_resources(struct igb_ring *rx_ring) 2601void igb_free_rx_resources(struct igb_ring *rx_ring)
2408{ 2602{
2409 struct pci_dev *pdev = rx_ring->adapter->pdev;
2410
2411 igb_clean_rx_ring(rx_ring); 2603 igb_clean_rx_ring(rx_ring);
2412 2604
2413 vfree(rx_ring->buffer_info); 2605 vfree(rx_ring->buffer_info);
2414 rx_ring->buffer_info = NULL; 2606 rx_ring->buffer_info = NULL;
2415 2607
2416 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2608 /* if not set, then don't free */
2609 if (!rx_ring->desc)
2610 return;
2611
2612 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2613 rx_ring->desc, rx_ring->dma);
2417 2614
2418 rx_ring->desc = NULL; 2615 rx_ring->desc = NULL;
2419} 2616}
@@ -2438,26 +2635,21 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2438 **/ 2635 **/
2439static void igb_clean_rx_ring(struct igb_ring *rx_ring) 2636static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2440{ 2637{
2441 struct igb_adapter *adapter = rx_ring->adapter;
2442 struct igb_buffer *buffer_info; 2638 struct igb_buffer *buffer_info;
2443 struct pci_dev *pdev = adapter->pdev;
2444 unsigned long size; 2639 unsigned long size;
2445 unsigned int i; 2640 unsigned int i;
2446 2641
2447 if (!rx_ring->buffer_info) 2642 if (!rx_ring->buffer_info)
2448 return; 2643 return;
2644
2449 /* Free all the Rx ring sk_buffs */ 2645 /* Free all the Rx ring sk_buffs */
2450 for (i = 0; i < rx_ring->count; i++) { 2646 for (i = 0; i < rx_ring->count; i++) {
2451 buffer_info = &rx_ring->buffer_info[i]; 2647 buffer_info = &rx_ring->buffer_info[i];
2452 if (buffer_info->dma) { 2648 if (buffer_info->dma) {
2453 if (adapter->rx_ps_hdr_size) 2649 pci_unmap_single(rx_ring->pdev,
2454 pci_unmap_single(pdev, buffer_info->dma, 2650 buffer_info->dma,
2455 adapter->rx_ps_hdr_size, 2651 rx_ring->rx_buffer_len,
2456 PCI_DMA_FROMDEVICE); 2652 PCI_DMA_FROMDEVICE);
2457 else
2458 pci_unmap_single(pdev, buffer_info->dma,
2459 adapter->rx_buffer_len,
2460 PCI_DMA_FROMDEVICE);
2461 buffer_info->dma = 0; 2653 buffer_info->dma = 0;
2462 } 2654 }
2463 2655
@@ -2465,14 +2657,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2465 dev_kfree_skb(buffer_info->skb); 2657 dev_kfree_skb(buffer_info->skb);
2466 buffer_info->skb = NULL; 2658 buffer_info->skb = NULL;
2467 } 2659 }
2660 if (buffer_info->page_dma) {
2661 pci_unmap_page(rx_ring->pdev,
2662 buffer_info->page_dma,
2663 PAGE_SIZE / 2,
2664 PCI_DMA_FROMDEVICE);
2665 buffer_info->page_dma = 0;
2666 }
2468 if (buffer_info->page) { 2667 if (buffer_info->page) {
2469 if (buffer_info->page_dma)
2470 pci_unmap_page(pdev, buffer_info->page_dma,
2471 PAGE_SIZE / 2,
2472 PCI_DMA_FROMDEVICE);
2473 put_page(buffer_info->page); 2668 put_page(buffer_info->page);
2474 buffer_info->page = NULL; 2669 buffer_info->page = NULL;
2475 buffer_info->page_dma = 0;
2476 buffer_info->page_offset = 0; 2670 buffer_info->page_offset = 0;
2477 } 2671 }
2478 } 2672 }
@@ -2485,9 +2679,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2485 2679
2486 rx_ring->next_to_clean = 0; 2680 rx_ring->next_to_clean = 0;
2487 rx_ring->next_to_use = 0; 2681 rx_ring->next_to_use = 0;
2488
2489 writel(0, adapter->hw.hw_addr + rx_ring->head);
2490 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2491} 2682}
2492 2683
2493/** 2684/**
@@ -2521,61 +2712,90 @@ static int igb_set_mac(struct net_device *netdev, void *p)
2521 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2712 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2522 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 2713 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2523 2714
2524 igb_rar_set(hw, hw->mac.addr, 0); 2715 /* set the correct pool for the new PF MAC address in entry 0 */
2525 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); 2716 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2717 adapter->vfs_allocated_count);
2526 2718
2527 return 0; 2719 return 0;
2528} 2720}
2529 2721
2530/** 2722/**
2531 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2723 * igb_write_mc_addr_list - write multicast addresses to MTA
2532 * @netdev: network interface device structure 2724 * @netdev: network interface device structure
2533 * 2725 *
2534 * The set_rx_mode entry point is called whenever the unicast or multicast 2726 * Writes multicast address list to the MTA hash table.
2535 * address lists or the network interface flags are updated. This routine is 2727 * Returns: -ENOMEM on failure
2536 * responsible for configuring the hardware for proper unicast, multicast, 2728 * 0 on no addresses written
2537 * promiscuous mode, and all-multi behavior. 2729 * X on writing X addresses to MTA
2538 **/ 2730 **/
2539static void igb_set_rx_mode(struct net_device *netdev) 2731static int igb_write_mc_addr_list(struct net_device *netdev)
2540{ 2732{
2541 struct igb_adapter *adapter = netdev_priv(netdev); 2733 struct igb_adapter *adapter = netdev_priv(netdev);
2542 struct e1000_hw *hw = &adapter->hw; 2734 struct e1000_hw *hw = &adapter->hw;
2543 unsigned int rar_entries = hw->mac.rar_entry_count -
2544 (adapter->vfs_allocated_count + 1);
2545 struct dev_mc_list *mc_ptr = netdev->mc_list; 2735 struct dev_mc_list *mc_ptr = netdev->mc_list;
2546 u8 *mta_list = NULL; 2736 u8 *mta_list;
2547 u32 rctl; 2737 u32 vmolr = 0;
2548 int i; 2738 int i;
2549 2739
2550 /* Check for Promiscuous and All Multicast modes */ 2740 if (!netdev->mc_count) {
2551 rctl = rd32(E1000_RCTL); 2741 /* nothing to program, so clear mc list */
2742 igb_update_mc_addr_list(hw, NULL, 0);
2743 igb_restore_vf_multicasts(adapter);
2744 return 0;
2745 }
2552 2746
2553 if (netdev->flags & IFF_PROMISC) { 2747 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2554 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2748 if (!mta_list)
2555 rctl &= ~E1000_RCTL_VFE; 2749 return -ENOMEM;
2556 } else {
2557 if (netdev->flags & IFF_ALLMULTI)
2558 rctl |= E1000_RCTL_MPE;
2559 else
2560 rctl &= ~E1000_RCTL_MPE;
2561 2750
2562 if (netdev->uc.count > rar_entries) 2751 /* set vmolr receive overflow multicast bit */
2563 rctl |= E1000_RCTL_UPE; 2752 vmolr |= E1000_VMOLR_ROMPE;
2564 else 2753
2565 rctl &= ~E1000_RCTL_UPE; 2754 /* The shared function expects a packed array of only addresses. */
2566 rctl |= E1000_RCTL_VFE; 2755 mc_ptr = netdev->mc_list;
2756
2757 for (i = 0; i < netdev->mc_count; i++) {
2758 if (!mc_ptr)
2759 break;
2760 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2761 mc_ptr = mc_ptr->next;
2567 } 2762 }
2568 wr32(E1000_RCTL, rctl); 2763 igb_update_mc_addr_list(hw, mta_list, i);
2764 kfree(mta_list);
2765
2766 return netdev->mc_count;
2767}
2768
2769/**
2770 * igb_write_uc_addr_list - write unicast addresses to RAR table
2771 * @netdev: network interface device structure
2772 *
2773 * Writes unicast address list to the RAR table.
2774 * Returns: -ENOMEM on failure/insufficient address space
2775 * 0 on no addresses written
2776 * X on writing X addresses to the RAR table
2777 **/
2778static int igb_write_uc_addr_list(struct net_device *netdev)
2779{
2780 struct igb_adapter *adapter = netdev_priv(netdev);
2781 struct e1000_hw *hw = &adapter->hw;
2782 unsigned int vfn = adapter->vfs_allocated_count;
2783 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2784 int count = 0;
2785
2786 /* return ENOMEM indicating insufficient memory for addresses */
2787 if (netdev->uc.count > rar_entries)
2788 return -ENOMEM;
2569 2789
2570 if (netdev->uc.count && rar_entries) { 2790 if (netdev->uc.count && rar_entries) {
2571 struct netdev_hw_addr *ha; 2791 struct netdev_hw_addr *ha;
2572 list_for_each_entry(ha, &netdev->uc.list, list) { 2792 list_for_each_entry(ha, &netdev->uc.list, list) {
2573 if (!rar_entries) 2793 if (!rar_entries)
2574 break; 2794 break;
2575 igb_rar_set(hw, ha->addr, rar_entries); 2795 igb_rar_set_qsel(adapter, ha->addr,
2576 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 2796 rar_entries--,
2577 rar_entries); 2797 vfn);
2578 rar_entries--; 2798 count++;
2579 } 2799 }
2580 } 2800 }
2581 /* write the addresses in reverse order to avoid write combining */ 2801 /* write the addresses in reverse order to avoid write combining */
@@ -2585,29 +2805,79 @@ static void igb_set_rx_mode(struct net_device *netdev)
2585 } 2805 }
2586 wrfl(); 2806 wrfl();
2587 2807
2588 if (!netdev->mc_count) { 2808 return count;
2589 /* nothing to program, so clear mc list */ 2809}
2590 igb_update_mc_addr_list(hw, NULL, 0); 2810
2591 igb_restore_vf_multicasts(adapter); 2811/**
2592 return; 2812 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2813 * @netdev: network interface device structure
2814 *
2815 * The set_rx_mode entry point is called whenever the unicast or multicast
2816 * address lists or the network interface flags are updated. This routine is
2817 * responsible for configuring the hardware for proper unicast, multicast,
2818 * promiscuous mode, and all-multi behavior.
2819 **/
2820static void igb_set_rx_mode(struct net_device *netdev)
2821{
2822 struct igb_adapter *adapter = netdev_priv(netdev);
2823 struct e1000_hw *hw = &adapter->hw;
2824 unsigned int vfn = adapter->vfs_allocated_count;
2825 u32 rctl, vmolr = 0;
2826 int count;
2827
2828 /* Check for Promiscuous and All Multicast modes */
2829 rctl = rd32(E1000_RCTL);
2830
2831 /* clear the effected bits */
2832 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2833
2834 if (netdev->flags & IFF_PROMISC) {
2835 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2836 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
2837 } else {
2838 if (netdev->flags & IFF_ALLMULTI) {
2839 rctl |= E1000_RCTL_MPE;
2840 vmolr |= E1000_VMOLR_MPME;
2841 } else {
2842 /*
2843 * Write addresses to the MTA, if the attempt fails
2844 * then we should just turn on promiscous mode so
2845 * that we can at least receive multicast traffic
2846 */
2847 count = igb_write_mc_addr_list(netdev);
2848 if (count < 0) {
2849 rctl |= E1000_RCTL_MPE;
2850 vmolr |= E1000_VMOLR_MPME;
2851 } else if (count) {
2852 vmolr |= E1000_VMOLR_ROMPE;
2853 }
2854 }
2855 /*
2856 * Write addresses to available RAR registers, if there is not
2857 * sufficient space to store all the addresses then enable
2858 * unicast promiscous mode
2859 */
2860 count = igb_write_uc_addr_list(netdev);
2861 if (count < 0) {
2862 rctl |= E1000_RCTL_UPE;
2863 vmolr |= E1000_VMOLR_ROPE;
2864 }
2865 rctl |= E1000_RCTL_VFE;
2593 } 2866 }
2867 wr32(E1000_RCTL, rctl);
2594 2868
2595 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC); 2869 /*
2596 if (!mta_list) { 2870 * In order to support SR-IOV and eventually VMDq it is necessary to set
2597 dev_err(&adapter->pdev->dev, 2871 * the VMOLR to enable the appropriate modes. Without this workaround
2598 "failed to allocate multicast filter list\n"); 2872 * we will have issues with VLAN tag stripping not being done for frames
2873 * that are only arriving because we are the default pool
2874 */
2875 if (hw->mac.type < e1000_82576)
2599 return; 2876 return;
2600 }
2601 2877
2602 /* The shared function expects a packed array of only addresses. */ 2878 vmolr |= rd32(E1000_VMOLR(vfn)) &
2603 for (i = 0; i < netdev->mc_count; i++) { 2879 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
2604 if (!mc_ptr) 2880 wr32(E1000_VMOLR(vfn), vmolr);
2605 break;
2606 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2607 mc_ptr = mc_ptr->next;
2608 }
2609 igb_update_mc_addr_list(hw, mta_list, i);
2610 kfree(mta_list);
2611 igb_restore_vf_multicasts(adapter); 2881 igb_restore_vf_multicasts(adapter);
2612} 2882}
2613 2883
@@ -2669,37 +2939,33 @@ static void igb_watchdog(unsigned long data)
2669static void igb_watchdog_task(struct work_struct *work) 2939static void igb_watchdog_task(struct work_struct *work)
2670{ 2940{
2671 struct igb_adapter *adapter = container_of(work, 2941 struct igb_adapter *adapter = container_of(work,
2672 struct igb_adapter, watchdog_task); 2942 struct igb_adapter,
2943 watchdog_task);
2673 struct e1000_hw *hw = &adapter->hw; 2944 struct e1000_hw *hw = &adapter->hw;
2674 struct net_device *netdev = adapter->netdev; 2945 struct net_device *netdev = adapter->netdev;
2675 struct igb_ring *tx_ring = adapter->tx_ring;
2676 u32 link; 2946 u32 link;
2677 u32 eics = 0;
2678 int i; 2947 int i;
2679 2948
2680 link = igb_has_link(adapter); 2949 link = igb_has_link(adapter);
2681 if ((netif_carrier_ok(netdev)) && link)
2682 goto link_up;
2683
2684 if (link) { 2950 if (link) {
2685 if (!netif_carrier_ok(netdev)) { 2951 if (!netif_carrier_ok(netdev)) {
2686 u32 ctrl; 2952 u32 ctrl;
2687 hw->mac.ops.get_speed_and_duplex(&adapter->hw, 2953 hw->mac.ops.get_speed_and_duplex(hw,
2688 &adapter->link_speed, 2954 &adapter->link_speed,
2689 &adapter->link_duplex); 2955 &adapter->link_duplex);
2690 2956
2691 ctrl = rd32(E1000_CTRL); 2957 ctrl = rd32(E1000_CTRL);
2692 /* Links status message must follow this format */ 2958 /* Links status message must follow this format */
2693 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " 2959 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2694 "Flow Control: %s\n", 2960 "Flow Control: %s\n",
2695 netdev->name, 2961 netdev->name,
2696 adapter->link_speed, 2962 adapter->link_speed,
2697 adapter->link_duplex == FULL_DUPLEX ? 2963 adapter->link_duplex == FULL_DUPLEX ?
2698 "Full Duplex" : "Half Duplex", 2964 "Full Duplex" : "Half Duplex",
2699 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2965 ((ctrl & E1000_CTRL_TFCE) &&
2700 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2966 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
2701 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2967 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
2702 E1000_CTRL_TFCE) ? "TX" : "None"))); 2968 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
2703 2969
2704 /* tweak tx_queue_len according to speed/duplex and 2970 /* tweak tx_queue_len according to speed/duplex and
2705 * adjust the timeout factor */ 2971 * adjust the timeout factor */
@@ -2743,46 +3009,40 @@ static void igb_watchdog_task(struct work_struct *work)
2743 } 3009 }
2744 } 3010 }
2745 3011
2746link_up:
2747 igb_update_stats(adapter); 3012 igb_update_stats(adapter);
3013 igb_update_adaptive(hw);
2748 3014
2749 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 3015 for (i = 0; i < adapter->num_tx_queues; i++) {
2750 adapter->tpt_old = adapter->stats.tpt; 3016 struct igb_ring *tx_ring = &adapter->tx_ring[i];
2751 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old; 3017 if (!netif_carrier_ok(netdev)) {
2752 adapter->colc_old = adapter->stats.colc;
2753
2754 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2755 adapter->gorc_old = adapter->stats.gorc;
2756 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2757 adapter->gotc_old = adapter->stats.gotc;
2758
2759 igb_update_adaptive(&adapter->hw);
2760
2761 if (!netif_carrier_ok(netdev)) {
2762 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2763 /* We've lost link, so the controller stops DMA, 3018 /* We've lost link, so the controller stops DMA,
2764 * but we've got queued Tx work that's never going 3019 * but we've got queued Tx work that's never going
2765 * to get done, so reset controller to flush Tx. 3020 * to get done, so reset controller to flush Tx.
2766 * (Do the reset outside of interrupt context). */ 3021 * (Do the reset outside of interrupt context). */
2767 adapter->tx_timeout_count++; 3022 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2768 schedule_work(&adapter->reset_task); 3023 adapter->tx_timeout_count++;
2769 /* return immediately since reset is imminent */ 3024 schedule_work(&adapter->reset_task);
2770 return; 3025 /* return immediately since reset is imminent */
3026 return;
3027 }
2771 } 3028 }
3029
3030 /* Force detection of hung controller every watchdog period */
3031 tx_ring->detect_tx_hung = true;
2772 } 3032 }
2773 3033
2774 /* Cause software interrupt to ensure rx ring is cleaned */ 3034 /* Cause software interrupt to ensure rx ring is cleaned */
2775 if (adapter->msix_entries) { 3035 if (adapter->msix_entries) {
2776 for (i = 0; i < adapter->num_rx_queues; i++) 3036 u32 eics = 0;
2777 eics |= adapter->rx_ring[i].eims_value; 3037 for (i = 0; i < adapter->num_q_vectors; i++) {
3038 struct igb_q_vector *q_vector = adapter->q_vector[i];
3039 eics |= q_vector->eims_value;
3040 }
2778 wr32(E1000_EICS, eics); 3041 wr32(E1000_EICS, eics);
2779 } else { 3042 } else {
2780 wr32(E1000_ICS, E1000_ICS_RXDMT0); 3043 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2781 } 3044 }
2782 3045
2783 /* Force detection of hung controller every watchdog period */
2784 tx_ring->detect_tx_hung = true;
2785
2786 /* Reset the timer */ 3046 /* Reset the timer */
2787 if (!test_bit(__IGB_DOWN, &adapter->state)) 3047 if (!test_bit(__IGB_DOWN, &adapter->state))
2788 mod_timer(&adapter->watchdog_timer, 3048 mod_timer(&adapter->watchdog_timer,
@@ -2796,7 +3056,6 @@ enum latency_range {
2796 latency_invalid = 255 3056 latency_invalid = 255
2797}; 3057};
2798 3058
2799
2800/** 3059/**
2801 * igb_update_ring_itr - update the dynamic ITR value based on packet size 3060 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2802 * 3061 *
@@ -2811,25 +3070,37 @@ enum latency_range {
2811 * parameter (see igb_param.c) 3070 * parameter (see igb_param.c)
2812 * NOTE: This function is called only when operating in a multiqueue 3071 * NOTE: This function is called only when operating in a multiqueue
2813 * receive environment. 3072 * receive environment.
2814 * @rx_ring: pointer to ring 3073 * @q_vector: pointer to q_vector
2815 **/ 3074 **/
2816static void igb_update_ring_itr(struct igb_ring *rx_ring) 3075static void igb_update_ring_itr(struct igb_q_vector *q_vector)
2817{ 3076{
2818 int new_val = rx_ring->itr_val; 3077 int new_val = q_vector->itr_val;
2819 int avg_wire_size = 0; 3078 int avg_wire_size = 0;
2820 struct igb_adapter *adapter = rx_ring->adapter; 3079 struct igb_adapter *adapter = q_vector->adapter;
2821
2822 if (!rx_ring->total_packets)
2823 goto clear_counts; /* no packets, so don't do anything */
2824 3080
2825 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3081 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2826 * ints/sec - ITR timer value of 120 ticks. 3082 * ints/sec - ITR timer value of 120 ticks.
2827 */ 3083 */
2828 if (adapter->link_speed != SPEED_1000) { 3084 if (adapter->link_speed != SPEED_1000) {
2829 new_val = 120; 3085 new_val = 976;
2830 goto set_itr_val; 3086 goto set_itr_val;
2831 } 3087 }
2832 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets; 3088
3089 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3090 struct igb_ring *ring = q_vector->rx_ring;
3091 avg_wire_size = ring->total_bytes / ring->total_packets;
3092 }
3093
3094 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3095 struct igb_ring *ring = q_vector->tx_ring;
3096 avg_wire_size = max_t(u32, avg_wire_size,
3097 (ring->total_bytes /
3098 ring->total_packets));
3099 }
3100
3101 /* if avg_wire_size isn't set no work was done */
3102 if (!avg_wire_size)
3103 goto clear_counts;
2833 3104
2834 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 3105 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2835 avg_wire_size += 24; 3106 avg_wire_size += 24;
@@ -2844,13 +3115,19 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring)
2844 new_val = avg_wire_size / 2; 3115 new_val = avg_wire_size / 2;
2845 3116
2846set_itr_val: 3117set_itr_val:
2847 if (new_val != rx_ring->itr_val) { 3118 if (new_val != q_vector->itr_val) {
2848 rx_ring->itr_val = new_val; 3119 q_vector->itr_val = new_val;
2849 rx_ring->set_itr = 1; 3120 q_vector->set_itr = 1;
2850 } 3121 }
2851clear_counts: 3122clear_counts:
2852 rx_ring->total_bytes = 0; 3123 if (q_vector->rx_ring) {
2853 rx_ring->total_packets = 0; 3124 q_vector->rx_ring->total_bytes = 0;
3125 q_vector->rx_ring->total_packets = 0;
3126 }
3127 if (q_vector->tx_ring) {
3128 q_vector->tx_ring->total_bytes = 0;
3129 q_vector->tx_ring->total_packets = 0;
3130 }
2854} 3131}
2855 3132
2856/** 3133/**
@@ -2867,7 +3144,7 @@ clear_counts:
2867 * NOTE: These calculations are only valid when operating in a single- 3144 * NOTE: These calculations are only valid when operating in a single-
2868 * queue environment. 3145 * queue environment.
2869 * @adapter: pointer to adapter 3146 * @adapter: pointer to adapter
2870 * @itr_setting: current adapter->itr 3147 * @itr_setting: current q_vector->itr_val
2871 * @packets: the number of packets during this measurement interval 3148 * @packets: the number of packets during this measurement interval
2872 * @bytes: the number of bytes during this measurement interval 3149 * @bytes: the number of bytes during this measurement interval
2873 **/ 3150 **/
@@ -2919,8 +3196,9 @@ update_itr_done:
2919 3196
2920static void igb_set_itr(struct igb_adapter *adapter) 3197static void igb_set_itr(struct igb_adapter *adapter)
2921{ 3198{
3199 struct igb_q_vector *q_vector = adapter->q_vector[0];
2922 u16 current_itr; 3200 u16 current_itr;
2923 u32 new_itr = adapter->itr; 3201 u32 new_itr = q_vector->itr_val;
2924 3202
2925 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 3203 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2926 if (adapter->link_speed != SPEED_1000) { 3204 if (adapter->link_speed != SPEED_1000) {
@@ -2934,18 +3212,14 @@ static void igb_set_itr(struct igb_adapter *adapter)
2934 adapter->rx_ring->total_packets, 3212 adapter->rx_ring->total_packets,
2935 adapter->rx_ring->total_bytes); 3213 adapter->rx_ring->total_bytes);
2936 3214
2937 if (adapter->rx_ring->buddy) { 3215 adapter->tx_itr = igb_update_itr(adapter,
2938 adapter->tx_itr = igb_update_itr(adapter, 3216 adapter->tx_itr,
2939 adapter->tx_itr, 3217 adapter->tx_ring->total_packets,
2940 adapter->tx_ring->total_packets, 3218 adapter->tx_ring->total_bytes);
2941 adapter->tx_ring->total_bytes); 3219 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2942 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2943 } else {
2944 current_itr = adapter->rx_itr;
2945 }
2946 3220
2947 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3221 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2948 if (adapter->itr_setting == 3 && current_itr == lowest_latency) 3222 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
2949 current_itr = low_latency; 3223 current_itr = low_latency;
2950 3224
2951 switch (current_itr) { 3225 switch (current_itr) {
@@ -2966,18 +3240,17 @@ static void igb_set_itr(struct igb_adapter *adapter)
2966set_itr_now: 3240set_itr_now:
2967 adapter->rx_ring->total_bytes = 0; 3241 adapter->rx_ring->total_bytes = 0;
2968 adapter->rx_ring->total_packets = 0; 3242 adapter->rx_ring->total_packets = 0;
2969 if (adapter->rx_ring->buddy) { 3243 adapter->tx_ring->total_bytes = 0;
2970 adapter->rx_ring->buddy->total_bytes = 0; 3244 adapter->tx_ring->total_packets = 0;
2971 adapter->rx_ring->buddy->total_packets = 0;
2972 }
2973 3245
2974 if (new_itr != adapter->itr) { 3246 if (new_itr != q_vector->itr_val) {
2975 /* this attempts to bias the interrupt rate towards Bulk 3247 /* this attempts to bias the interrupt rate towards Bulk
2976 * by adding intermediate steps when interrupt rate is 3248 * by adding intermediate steps when interrupt rate is
2977 * increasing */ 3249 * increasing */
2978 new_itr = new_itr > adapter->itr ? 3250 new_itr = new_itr > q_vector->itr_val ?
2979 max((new_itr * adapter->itr) / 3251 max((new_itr * q_vector->itr_val) /
2980 (new_itr + (adapter->itr >> 2)), new_itr) : 3252 (new_itr + (q_vector->itr_val >> 2)),
3253 new_itr) :
2981 new_itr; 3254 new_itr;
2982 /* Don't write the value here; it resets the adapter's 3255 /* Don't write the value here; it resets the adapter's
2983 * internal timer, and causes us to delay far longer than 3256 * internal timer, and causes us to delay far longer than
@@ -2985,25 +3258,22 @@ set_itr_now:
2985 * value at the beginning of the next interrupt so the timing 3258 * value at the beginning of the next interrupt so the timing
2986 * ends up being correct. 3259 * ends up being correct.
2987 */ 3260 */
2988 adapter->itr = new_itr; 3261 q_vector->itr_val = new_itr;
2989 adapter->rx_ring->itr_val = new_itr; 3262 q_vector->set_itr = 1;
2990 adapter->rx_ring->set_itr = 1;
2991 } 3263 }
2992 3264
2993 return; 3265 return;
2994} 3266}
2995 3267
2996
2997#define IGB_TX_FLAGS_CSUM 0x00000001 3268#define IGB_TX_FLAGS_CSUM 0x00000001
2998#define IGB_TX_FLAGS_VLAN 0x00000002 3269#define IGB_TX_FLAGS_VLAN 0x00000002
2999#define IGB_TX_FLAGS_TSO 0x00000004 3270#define IGB_TX_FLAGS_TSO 0x00000004
3000#define IGB_TX_FLAGS_IPV4 0x00000008 3271#define IGB_TX_FLAGS_IPV4 0x00000008
3001#define IGB_TX_FLAGS_TSTAMP 0x00000010 3272#define IGB_TX_FLAGS_TSTAMP 0x00000010
3002#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 3273#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3003#define IGB_TX_FLAGS_VLAN_SHIFT 16 3274#define IGB_TX_FLAGS_VLAN_SHIFT 16
3004 3275
3005static inline int igb_tso_adv(struct igb_adapter *adapter, 3276static inline int igb_tso_adv(struct igb_ring *tx_ring,
3006 struct igb_ring *tx_ring,
3007 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 3277 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3008{ 3278{
3009 struct e1000_adv_tx_context_desc *context_desc; 3279 struct e1000_adv_tx_context_desc *context_desc;
@@ -3065,8 +3335,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
3065 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 3335 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3066 3336
3067 /* For 82575, context index must be unique per ring. */ 3337 /* For 82575, context index must be unique per ring. */
3068 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 3338 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3069 mss_l4len_idx |= tx_ring->queue_index << 4; 3339 mss_l4len_idx |= tx_ring->reg_idx << 4;
3070 3340
3071 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 3341 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3072 context_desc->seqnum_seed = 0; 3342 context_desc->seqnum_seed = 0;
@@ -3083,14 +3353,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
3083 return true; 3353 return true;
3084} 3354}
3085 3355
3086static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, 3356static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3087 struct igb_ring *tx_ring, 3357 struct sk_buff *skb, u32 tx_flags)
3088 struct sk_buff *skb, u32 tx_flags)
3089{ 3358{
3090 struct e1000_adv_tx_context_desc *context_desc; 3359 struct e1000_adv_tx_context_desc *context_desc;
3091 unsigned int i; 3360 struct pci_dev *pdev = tx_ring->pdev;
3092 struct igb_buffer *buffer_info; 3361 struct igb_buffer *buffer_info;
3093 u32 info = 0, tu_cmd = 0; 3362 u32 info = 0, tu_cmd = 0;
3363 unsigned int i;
3094 3364
3095 if ((skb->ip_summed == CHECKSUM_PARTIAL) || 3365 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3096 (tx_flags & IGB_TX_FLAGS_VLAN)) { 3366 (tx_flags & IGB_TX_FLAGS_VLAN)) {
@@ -3100,6 +3370,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3100 3370
3101 if (tx_flags & IGB_TX_FLAGS_VLAN) 3371 if (tx_flags & IGB_TX_FLAGS_VLAN)
3102 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); 3372 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3373
3103 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 3374 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3104 if (skb->ip_summed == CHECKSUM_PARTIAL) 3375 if (skb->ip_summed == CHECKSUM_PARTIAL)
3105 info |= skb_network_header_len(skb); 3376 info |= skb_network_header_len(skb);
@@ -3137,7 +3408,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3137 break; 3408 break;
3138 default: 3409 default:
3139 if (unlikely(net_ratelimit())) 3410 if (unlikely(net_ratelimit()))
3140 dev_warn(&adapter->pdev->dev, 3411 dev_warn(&pdev->dev,
3141 "partial checksum but proto=%x!\n", 3412 "partial checksum but proto=%x!\n",
3142 skb->protocol); 3413 skb->protocol);
3143 break; 3414 break;
@@ -3146,11 +3417,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3146 3417
3147 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 3418 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3148 context_desc->seqnum_seed = 0; 3419 context_desc->seqnum_seed = 0;
3149 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 3420 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3150 context_desc->mss_l4len_idx = 3421 context_desc->mss_l4len_idx =
3151 cpu_to_le32(tx_ring->queue_index << 4); 3422 cpu_to_le32(tx_ring->reg_idx << 4);
3152 else
3153 context_desc->mss_l4len_idx = 0;
3154 3423
3155 buffer_info->time_stamp = jiffies; 3424 buffer_info->time_stamp = jiffies;
3156 buffer_info->next_to_watch = i; 3425 buffer_info->next_to_watch = i;
@@ -3169,11 +3438,11 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3169#define IGB_MAX_TXD_PWR 16 3438#define IGB_MAX_TXD_PWR 16
3170#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) 3439#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3171 3440
3172static inline int igb_tx_map_adv(struct igb_adapter *adapter, 3441static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3173 struct igb_ring *tx_ring, struct sk_buff *skb,
3174 unsigned int first) 3442 unsigned int first)
3175{ 3443{
3176 struct igb_buffer *buffer_info; 3444 struct igb_buffer *buffer_info;
3445 struct pci_dev *pdev = tx_ring->pdev;
3177 unsigned int len = skb_headlen(skb); 3446 unsigned int len = skb_headlen(skb);
3178 unsigned int count = 0, i; 3447 unsigned int count = 0, i;
3179 unsigned int f; 3448 unsigned int f;
@@ -3181,8 +3450,8 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3181 3450
3182 i = tx_ring->next_to_use; 3451 i = tx_ring->next_to_use;
3183 3452
3184 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { 3453 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
3185 dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); 3454 dev_err(&pdev->dev, "TX DMA map failed\n");
3186 return 0; 3455 return 0;
3187 } 3456 }
3188 3457
@@ -3218,18 +3487,17 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3218 tx_ring->buffer_info[i].skb = skb; 3487 tx_ring->buffer_info[i].skb = skb;
3219 tx_ring->buffer_info[first].next_to_watch = i; 3488 tx_ring->buffer_info[first].next_to_watch = i;
3220 3489
3221 return count + 1; 3490 return ++count;
3222} 3491}
3223 3492
3224static inline void igb_tx_queue_adv(struct igb_adapter *adapter, 3493static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3225 struct igb_ring *tx_ring,
3226 int tx_flags, int count, u32 paylen, 3494 int tx_flags, int count, u32 paylen,
3227 u8 hdr_len) 3495 u8 hdr_len)
3228{ 3496{
3229 union e1000_adv_tx_desc *tx_desc = NULL; 3497 union e1000_adv_tx_desc *tx_desc;
3230 struct igb_buffer *buffer_info; 3498 struct igb_buffer *buffer_info;
3231 u32 olinfo_status = 0, cmd_type_len; 3499 u32 olinfo_status = 0, cmd_type_len;
3232 unsigned int i; 3500 unsigned int i = tx_ring->next_to_use;
3233 3501
3234 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 3502 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3235 E1000_ADVTXD_DCMD_DEXT); 3503 E1000_ADVTXD_DCMD_DEXT);
@@ -3254,27 +3522,28 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3254 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3522 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3255 } 3523 }
3256 3524
3257 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) && 3525 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3258 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | 3526 (tx_flags & (IGB_TX_FLAGS_CSUM |
3527 IGB_TX_FLAGS_TSO |
3259 IGB_TX_FLAGS_VLAN))) 3528 IGB_TX_FLAGS_VLAN)))
3260 olinfo_status |= tx_ring->queue_index << 4; 3529 olinfo_status |= tx_ring->reg_idx << 4;
3261 3530
3262 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 3531 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3263 3532
3264 i = tx_ring->next_to_use; 3533 do {
3265 while (count--) {
3266 buffer_info = &tx_ring->buffer_info[i]; 3534 buffer_info = &tx_ring->buffer_info[i];
3267 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 3535 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3268 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 3536 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3269 tx_desc->read.cmd_type_len = 3537 tx_desc->read.cmd_type_len =
3270 cpu_to_le32(cmd_type_len | buffer_info->length); 3538 cpu_to_le32(cmd_type_len | buffer_info->length);
3271 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3539 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3540 count--;
3272 i++; 3541 i++;
3273 if (i == tx_ring->count) 3542 if (i == tx_ring->count)
3274 i = 0; 3543 i = 0;
3275 } 3544 } while (count > 0);
3276 3545
3277 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); 3546 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
3278 /* Force memory writes to complete before letting h/w 3547 /* Force memory writes to complete before letting h/w
3279 * know there are new descriptors to fetch. (Only 3548 * know there are new descriptors to fetch. (Only
3280 * applicable for weak-ordered memory model archs, 3549 * applicable for weak-ordered memory model archs,
@@ -3282,16 +3551,15 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3282 wmb(); 3551 wmb();
3283 3552
3284 tx_ring->next_to_use = i; 3553 tx_ring->next_to_use = i;
3285 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3554 writel(i, tx_ring->tail);
3286 /* we need this if more than one processor can write to our tail 3555 /* we need this if more than one processor can write to our tail
3287 * at a time, it syncronizes IO on IA64/Altix systems */ 3556 * at a time, it syncronizes IO on IA64/Altix systems */
3288 mmiowb(); 3557 mmiowb();
3289} 3558}
3290 3559
3291static int __igb_maybe_stop_tx(struct net_device *netdev, 3560static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3292 struct igb_ring *tx_ring, int size)
3293{ 3561{
3294 struct igb_adapter *adapter = netdev_priv(netdev); 3562 struct net_device *netdev = tx_ring->netdev;
3295 3563
3296 netif_stop_subqueue(netdev, tx_ring->queue_index); 3564 netif_stop_subqueue(netdev, tx_ring->queue_index);
3297 3565
@@ -3307,66 +3575,43 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
3307 3575
3308 /* A reprieve! */ 3576 /* A reprieve! */
3309 netif_wake_subqueue(netdev, tx_ring->queue_index); 3577 netif_wake_subqueue(netdev, tx_ring->queue_index);
3310 ++adapter->restart_queue; 3578 tx_ring->tx_stats.restart_queue++;
3311 return 0; 3579 return 0;
3312} 3580}
3313 3581
3314static int igb_maybe_stop_tx(struct net_device *netdev, 3582static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3315 struct igb_ring *tx_ring, int size)
3316{ 3583{
3317 if (igb_desc_unused(tx_ring) >= size) 3584 if (igb_desc_unused(tx_ring) >= size)
3318 return 0; 3585 return 0;
3319 return __igb_maybe_stop_tx(netdev, tx_ring, size); 3586 return __igb_maybe_stop_tx(tx_ring, size);
3320} 3587}
3321 3588
3322static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, 3589netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3323 struct net_device *netdev, 3590 struct igb_ring *tx_ring)
3324 struct igb_ring *tx_ring)
3325{ 3591{
3326 struct igb_adapter *adapter = netdev_priv(netdev); 3592 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
3327 unsigned int first; 3593 unsigned int first;
3328 unsigned int tx_flags = 0; 3594 unsigned int tx_flags = 0;
3329 u8 hdr_len = 0; 3595 u8 hdr_len = 0;
3330 int count = 0; 3596 int tso = 0, count;
3331 int tso = 0; 3597 union skb_shared_tx *shtx = skb_tx(skb);
3332 union skb_shared_tx *shtx;
3333
3334 if (test_bit(__IGB_DOWN, &adapter->state)) {
3335 dev_kfree_skb_any(skb);
3336 return NETDEV_TX_OK;
3337 }
3338
3339 if (skb->len <= 0) {
3340 dev_kfree_skb_any(skb);
3341 return NETDEV_TX_OK;
3342 }
3343 3598
3344 /* need: 1 descriptor per page, 3599 /* need: 1 descriptor per page,
3345 * + 2 desc gap to keep tail from touching head, 3600 * + 2 desc gap to keep tail from touching head,
3346 * + 1 desc for skb->data, 3601 * + 1 desc for skb->data,
3347 * + 1 desc for context descriptor, 3602 * + 1 desc for context descriptor,
3348 * otherwise try next time */ 3603 * otherwise try next time */
3349 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { 3604 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3350 /* this is a hard error */ 3605 /* this is a hard error */
3351 return NETDEV_TX_BUSY; 3606 return NETDEV_TX_BUSY;
3352 } 3607 }
3353 3608
3354 /*
3355 * TODO: check that there currently is no other packet with
3356 * time stamping in the queue
3357 *
3358 * When doing time stamping, keep the connection to the socket
3359 * a while longer: it is still needed by skb_hwtstamp_tx(),
3360 * called either in igb_tx_hwtstamp() or by our caller when
3361 * doing software time stamping.
3362 */
3363 shtx = skb_tx(skb);
3364 if (unlikely(shtx->hardware)) { 3609 if (unlikely(shtx->hardware)) {
3365 shtx->in_progress = 1; 3610 shtx->in_progress = 1;
3366 tx_flags |= IGB_TX_FLAGS_TSTAMP; 3611 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3367 } 3612 }
3368 3613
3369 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3614 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
3370 tx_flags |= IGB_TX_FLAGS_VLAN; 3615 tx_flags |= IGB_TX_FLAGS_VLAN;
3371 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 3616 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3372 } 3617 }
@@ -3375,37 +3620,38 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3375 tx_flags |= IGB_TX_FLAGS_IPV4; 3620 tx_flags |= IGB_TX_FLAGS_IPV4;
3376 3621
3377 first = tx_ring->next_to_use; 3622 first = tx_ring->next_to_use;
3378 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, 3623 if (skb_is_gso(skb)) {
3379 &hdr_len) : 0; 3624 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3380 3625
3381 if (tso < 0) { 3626 if (tso < 0) {
3382 dev_kfree_skb_any(skb); 3627 dev_kfree_skb_any(skb);
3383 return NETDEV_TX_OK; 3628 return NETDEV_TX_OK;
3629 }
3384 } 3630 }
3385 3631
3386 if (tso) 3632 if (tso)
3387 tx_flags |= IGB_TX_FLAGS_TSO; 3633 tx_flags |= IGB_TX_FLAGS_TSO;
3388 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) && 3634 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
3389 (skb->ip_summed == CHECKSUM_PARTIAL)) 3635 (skb->ip_summed == CHECKSUM_PARTIAL))
3390 tx_flags |= IGB_TX_FLAGS_CSUM; 3636 tx_flags |= IGB_TX_FLAGS_CSUM;
3391 3637
3392 /* 3638 /*
3393 * count reflects descriptors mapped, if 0 then mapping error 3639 * count reflects descriptors mapped, if 0 or less then mapping error
3394 * has occured and we need to rewind the descriptor queue 3640 * has occured and we need to rewind the descriptor queue
3395 */ 3641 */
3396 count = igb_tx_map_adv(adapter, tx_ring, skb, first); 3642 count = igb_tx_map_adv(tx_ring, skb, first);
3397 3643 if (count <= 0) {
3398 if (count) {
3399 igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
3400 skb->len, hdr_len);
3401 /* Make sure there is space in the ring for the next send. */
3402 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3403 } else {
3404 dev_kfree_skb_any(skb); 3644 dev_kfree_skb_any(skb);
3405 tx_ring->buffer_info[first].time_stamp = 0; 3645 tx_ring->buffer_info[first].time_stamp = 0;
3406 tx_ring->next_to_use = first; 3646 tx_ring->next_to_use = first;
3647 return NETDEV_TX_OK;
3407 } 3648 }
3408 3649
3650 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3651
3652 /* Make sure there is space in the ring for the next send. */
3653 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
3654
3409 return NETDEV_TX_OK; 3655 return NETDEV_TX_OK;
3410} 3656}
3411 3657
@@ -3414,8 +3660,18 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3414{ 3660{
3415 struct igb_adapter *adapter = netdev_priv(netdev); 3661 struct igb_adapter *adapter = netdev_priv(netdev);
3416 struct igb_ring *tx_ring; 3662 struct igb_ring *tx_ring;
3417
3418 int r_idx = 0; 3663 int r_idx = 0;
3664
3665 if (test_bit(__IGB_DOWN, &adapter->state)) {
3666 dev_kfree_skb_any(skb);
3667 return NETDEV_TX_OK;
3668 }
3669
3670 if (skb->len <= 0) {
3671 dev_kfree_skb_any(skb);
3672 return NETDEV_TX_OK;
3673 }
3674
3419 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); 3675 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3420 tx_ring = adapter->multi_tx_table[r_idx]; 3676 tx_ring = adapter->multi_tx_table[r_idx];
3421 3677
@@ -3423,7 +3679,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3423 * to a flow. Right now, performance is impacted slightly negatively 3679 * to a flow. Right now, performance is impacted slightly negatively
3424 * if using multiple tx queues. If the stack breaks away from a 3680 * if using multiple tx queues. If the stack breaks away from a
3425 * single qdisc implementation, we can look at this again. */ 3681 * single qdisc implementation, we can look at this again. */
3426 return igb_xmit_frame_ring_adv(skb, netdev, tx_ring); 3682 return igb_xmit_frame_ring_adv(skb, tx_ring);
3427} 3683}
3428 3684
3429/** 3685/**
@@ -3437,6 +3693,7 @@ static void igb_tx_timeout(struct net_device *netdev)
3437 3693
3438 /* Do the reset outside of interrupt context */ 3694 /* Do the reset outside of interrupt context */
3439 adapter->tx_timeout_count++; 3695 adapter->tx_timeout_count++;
3696
3440 schedule_work(&adapter->reset_task); 3697 schedule_work(&adapter->reset_task);
3441 wr32(E1000_EICS, 3698 wr32(E1000_EICS,
3442 (adapter->eims_enable_mask & ~adapter->eims_other)); 3699 (adapter->eims_enable_mask & ~adapter->eims_other));
@@ -3459,10 +3716,8 @@ static void igb_reset_task(struct work_struct *work)
3459 **/ 3716 **/
3460static struct net_device_stats *igb_get_stats(struct net_device *netdev) 3717static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3461{ 3718{
3462 struct igb_adapter *adapter = netdev_priv(netdev);
3463
3464 /* only return the current stats */ 3719 /* only return the current stats */
3465 return &adapter->net_stats; 3720 return &netdev->stats;
3466} 3721}
3467 3722
3468/** 3723/**
@@ -3475,16 +3730,17 @@ static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3475static int igb_change_mtu(struct net_device *netdev, int new_mtu) 3730static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3476{ 3731{
3477 struct igb_adapter *adapter = netdev_priv(netdev); 3732 struct igb_adapter *adapter = netdev_priv(netdev);
3733 struct pci_dev *pdev = adapter->pdev;
3478 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3734 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3735 u32 rx_buffer_len, i;
3479 3736
3480 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 3737 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3481 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3738 dev_err(&pdev->dev, "Invalid MTU setting\n");
3482 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3483 return -EINVAL; 3739 return -EINVAL;
3484 } 3740 }
3485 3741
3486 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3742 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3487 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 3743 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
3488 return -EINVAL; 3744 return -EINVAL;
3489 } 3745 }
3490 3746
@@ -3493,8 +3749,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3493 3749
3494 /* igb_down has a dependency on max_frame_size */ 3750 /* igb_down has a dependency on max_frame_size */
3495 adapter->max_frame_size = max_frame; 3751 adapter->max_frame_size = max_frame;
3496 if (netif_running(netdev))
3497 igb_down(adapter);
3498 3752
3499 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3753 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3500 * means we reserve 2 more, this pushes us to allocate from the next 3754 * means we reserve 2 more, this pushes us to allocate from the next
@@ -3502,35 +3756,23 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3502 * i.e. RXBUFFER_2048 --> size-4096 slab 3756 * i.e. RXBUFFER_2048 --> size-4096 slab
3503 */ 3757 */
3504 3758
3505 if (max_frame <= IGB_RXBUFFER_256) 3759 if (max_frame <= IGB_RXBUFFER_1024)
3506 adapter->rx_buffer_len = IGB_RXBUFFER_256; 3760 rx_buffer_len = IGB_RXBUFFER_1024;
3507 else if (max_frame <= IGB_RXBUFFER_512) 3761 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
3508 adapter->rx_buffer_len = IGB_RXBUFFER_512; 3762 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3509 else if (max_frame <= IGB_RXBUFFER_1024)
3510 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3511 else if (max_frame <= IGB_RXBUFFER_2048)
3512 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3513 else 3763 else
3514#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 3764 rx_buffer_len = IGB_RXBUFFER_128;
3515 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3516#else
3517 adapter->rx_buffer_len = PAGE_SIZE / 2;
3518#endif
3519
3520 /* if sr-iov is enabled we need to force buffer size to 1K or larger */
3521 if (adapter->vfs_allocated_count &&
3522 (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
3523 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3524 3765
3525 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3766 if (netif_running(netdev))
3526 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3767 igb_down(adapter);
3527 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3528 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3529 3768
3530 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 3769 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
3531 netdev->mtu, new_mtu); 3770 netdev->mtu, new_mtu);
3532 netdev->mtu = new_mtu; 3771 netdev->mtu = new_mtu;
3533 3772
3773 for (i = 0; i < adapter->num_rx_queues; i++)
3774 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3775
3534 if (netif_running(netdev)) 3776 if (netif_running(netdev))
3535 igb_up(adapter); 3777 igb_up(adapter);
3536 else 3778 else
@@ -3548,9 +3790,13 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3548 3790
3549void igb_update_stats(struct igb_adapter *adapter) 3791void igb_update_stats(struct igb_adapter *adapter)
3550{ 3792{
3793 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3551 struct e1000_hw *hw = &adapter->hw; 3794 struct e1000_hw *hw = &adapter->hw;
3552 struct pci_dev *pdev = adapter->pdev; 3795 struct pci_dev *pdev = adapter->pdev;
3796 u32 rnbc;
3553 u16 phy_tmp; 3797 u16 phy_tmp;
3798 int i;
3799 u64 bytes, packets;
3554 3800
3555#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3801#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3556 3802
@@ -3563,6 +3809,29 @@ void igb_update_stats(struct igb_adapter *adapter)
3563 if (pci_channel_offline(pdev)) 3809 if (pci_channel_offline(pdev))
3564 return; 3810 return;
3565 3811
3812 bytes = 0;
3813 packets = 0;
3814 for (i = 0; i < adapter->num_rx_queues; i++) {
3815 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3816 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3817 net_stats->rx_fifo_errors += rqdpc_tmp;
3818 bytes += adapter->rx_ring[i].rx_stats.bytes;
3819 packets += adapter->rx_ring[i].rx_stats.packets;
3820 }
3821
3822 net_stats->rx_bytes = bytes;
3823 net_stats->rx_packets = packets;
3824
3825 bytes = 0;
3826 packets = 0;
3827 for (i = 0; i < adapter->num_tx_queues; i++) {
3828 bytes += adapter->tx_ring[i].tx_stats.bytes;
3829 packets += adapter->tx_ring[i].tx_stats.packets;
3830 }
3831 net_stats->tx_bytes = bytes;
3832 net_stats->tx_packets = packets;
3833
3834 /* read stats registers */
3566 adapter->stats.crcerrs += rd32(E1000_CRCERRS); 3835 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3567 adapter->stats.gprc += rd32(E1000_GPRC); 3836 adapter->stats.gprc += rd32(E1000_GPRC);
3568 adapter->stats.gorc += rd32(E1000_GORCL); 3837 adapter->stats.gorc += rd32(E1000_GORCL);
@@ -3595,7 +3864,9 @@ void igb_update_stats(struct igb_adapter *adapter)
3595 adapter->stats.gptc += rd32(E1000_GPTC); 3864 adapter->stats.gptc += rd32(E1000_GPTC);
3596 adapter->stats.gotc += rd32(E1000_GOTCL); 3865 adapter->stats.gotc += rd32(E1000_GOTCL);
3597 rd32(E1000_GOTCH); /* clear GOTCL */ 3866 rd32(E1000_GOTCH); /* clear GOTCL */
3598 adapter->stats.rnbc += rd32(E1000_RNBC); 3867 rnbc = rd32(E1000_RNBC);
3868 adapter->stats.rnbc += rnbc;
3869 net_stats->rx_fifo_errors += rnbc;
3599 adapter->stats.ruc += rd32(E1000_RUC); 3870 adapter->stats.ruc += rd32(E1000_RUC);
3600 adapter->stats.rfc += rd32(E1000_RFC); 3871 adapter->stats.rfc += rd32(E1000_RFC);
3601 adapter->stats.rjc += rd32(E1000_RJC); 3872 adapter->stats.rjc += rd32(E1000_RJC);
@@ -3614,7 +3885,6 @@ void igb_update_stats(struct igb_adapter *adapter)
3614 adapter->stats.bptc += rd32(E1000_BPTC); 3885 adapter->stats.bptc += rd32(E1000_BPTC);
3615 3886
3616 /* used for adaptive IFS */ 3887 /* used for adaptive IFS */
3617
3618 hw->mac.tx_packet_delta = rd32(E1000_TPT); 3888 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3619 adapter->stats.tpt += hw->mac.tx_packet_delta; 3889 adapter->stats.tpt += hw->mac.tx_packet_delta;
3620 hw->mac.collision_delta = rd32(E1000_COLC); 3890 hw->mac.collision_delta = rd32(E1000_COLC);
@@ -3637,56 +3907,29 @@ void igb_update_stats(struct igb_adapter *adapter)
3637 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); 3907 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3638 3908
3639 /* Fill out the OS statistics structure */ 3909 /* Fill out the OS statistics structure */
3640 adapter->net_stats.multicast = adapter->stats.mprc; 3910 net_stats->multicast = adapter->stats.mprc;
3641 adapter->net_stats.collisions = adapter->stats.colc; 3911 net_stats->collisions = adapter->stats.colc;
3642 3912
3643 /* Rx Errors */ 3913 /* Rx Errors */
3644 3914
3645 if (hw->mac.type != e1000_82575) {
3646 u32 rqdpc_tmp;
3647 u64 rqdpc_total = 0;
3648 int i;
3649 /* Read out drops stats per RX queue. Notice RQDPC (Receive
3650 * Queue Drop Packet Count) stats only gets incremented, if
3651 * the DROP_EN but it set (in the SRRCTL register for that
3652 * queue). If DROP_EN bit is NOT set, then the some what
3653 * equivalent count is stored in RNBC (not per queue basis).
3654 * Also note the drop count is due to lack of available
3655 * descriptors.
3656 */
3657 for (i = 0; i < adapter->num_rx_queues; i++) {
3658 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
3659 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3660 rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
3661 }
3662 adapter->net_stats.rx_fifo_errors = rqdpc_total;
3663 }
3664
3665 /* Note RNBC (Receive No Buffers Count) is an not an exact
3666 * drop count as the hardware FIFO might save the day. Thats
3667 * one of the reason for saving it in rx_fifo_errors, as its
3668 * potentially not a true drop.
3669 */
3670 adapter->net_stats.rx_fifo_errors += adapter->stats.rnbc;
3671
3672 /* RLEC on some newer hardware can be incorrect so build 3915 /* RLEC on some newer hardware can be incorrect so build
3673 * our own version based on RUC and ROC */ 3916 * our own version based on RUC and ROC */
3674 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3917 net_stats->rx_errors = adapter->stats.rxerrc +
3675 adapter->stats.crcerrs + adapter->stats.algnerrc + 3918 adapter->stats.crcerrs + adapter->stats.algnerrc +
3676 adapter->stats.ruc + adapter->stats.roc + 3919 adapter->stats.ruc + adapter->stats.roc +
3677 adapter->stats.cexterr; 3920 adapter->stats.cexterr;
3678 adapter->net_stats.rx_length_errors = adapter->stats.ruc + 3921 net_stats->rx_length_errors = adapter->stats.ruc +
3679 adapter->stats.roc; 3922 adapter->stats.roc;
3680 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3923 net_stats->rx_crc_errors = adapter->stats.crcerrs;
3681 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3924 net_stats->rx_frame_errors = adapter->stats.algnerrc;
3682 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3925 net_stats->rx_missed_errors = adapter->stats.mpc;
3683 3926
3684 /* Tx Errors */ 3927 /* Tx Errors */
3685 adapter->net_stats.tx_errors = adapter->stats.ecol + 3928 net_stats->tx_errors = adapter->stats.ecol +
3686 adapter->stats.latecol; 3929 adapter->stats.latecol;
3687 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; 3930 net_stats->tx_aborted_errors = adapter->stats.ecol;
3688 adapter->net_stats.tx_window_errors = adapter->stats.latecol; 3931 net_stats->tx_window_errors = adapter->stats.latecol;
3689 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; 3932 net_stats->tx_carrier_errors = adapter->stats.tncrs;
3690 3933
3691 /* Tx Dropped needs to be maintained elsewhere */ 3934 /* Tx Dropped needs to be maintained elsewhere */
3692 3935
@@ -3707,14 +3950,12 @@ void igb_update_stats(struct igb_adapter *adapter)
3707 3950
3708static irqreturn_t igb_msix_other(int irq, void *data) 3951static irqreturn_t igb_msix_other(int irq, void *data)
3709{ 3952{
3710 struct net_device *netdev = data; 3953 struct igb_adapter *adapter = data;
3711 struct igb_adapter *adapter = netdev_priv(netdev);
3712 struct e1000_hw *hw = &adapter->hw; 3954 struct e1000_hw *hw = &adapter->hw;
3713 u32 icr = rd32(E1000_ICR); 3955 u32 icr = rd32(E1000_ICR);
3714
3715 /* reading ICR causes bit 31 of EICR to be cleared */ 3956 /* reading ICR causes bit 31 of EICR to be cleared */
3716 3957
3717 if(icr & E1000_ICR_DOUTSYNC) { 3958 if (icr & E1000_ICR_DOUTSYNC) {
3718 /* HW is reporting DMA is out of sync */ 3959 /* HW is reporting DMA is out of sync */
3719 adapter->stats.doosync++; 3960 adapter->stats.doosync++;
3720 } 3961 }
@@ -3730,125 +3971,90 @@ static irqreturn_t igb_msix_other(int irq, void *data)
3730 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3971 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3731 } 3972 }
3732 3973
3733 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB); 3974 if (adapter->vfs_allocated_count)
3975 wr32(E1000_IMS, E1000_IMS_LSC |
3976 E1000_IMS_VMMB |
3977 E1000_IMS_DOUTSYNC);
3978 else
3979 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
3734 wr32(E1000_EIMS, adapter->eims_other); 3980 wr32(E1000_EIMS, adapter->eims_other);
3735 3981
3736 return IRQ_HANDLED; 3982 return IRQ_HANDLED;
3737} 3983}
3738 3984
3739static irqreturn_t igb_msix_tx(int irq, void *data) 3985static void igb_write_itr(struct igb_q_vector *q_vector)
3740{ 3986{
3741 struct igb_ring *tx_ring = data; 3987 u32 itr_val = q_vector->itr_val & 0x7FFC;
3742 struct igb_adapter *adapter = tx_ring->adapter;
3743 struct e1000_hw *hw = &adapter->hw;
3744 3988
3745#ifdef CONFIG_IGB_DCA 3989 if (!q_vector->set_itr)
3746 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3990 return;
3747 igb_update_tx_dca(tx_ring);
3748#endif
3749 3991
3750 tx_ring->total_bytes = 0; 3992 if (!itr_val)
3751 tx_ring->total_packets = 0; 3993 itr_val = 0x4;
3752 3994
3753 /* auto mask will automatically reenable the interrupt when we write 3995 if (q_vector->itr_shift)
3754 * EICS */ 3996 itr_val |= itr_val << q_vector->itr_shift;
3755 if (!igb_clean_tx_irq(tx_ring))
3756 /* Ring was not completely cleaned, so fire another interrupt */
3757 wr32(E1000_EICS, tx_ring->eims_value);
3758 else 3997 else
3759 wr32(E1000_EIMS, tx_ring->eims_value); 3998 itr_val |= 0x8000000;
3760 3999
3761 return IRQ_HANDLED; 4000 writel(itr_val, q_vector->itr_register);
4001 q_vector->set_itr = 0;
3762} 4002}
3763 4003
3764static void igb_write_itr(struct igb_ring *ring) 4004static irqreturn_t igb_msix_ring(int irq, void *data)
3765{ 4005{
3766 struct e1000_hw *hw = &ring->adapter->hw; 4006 struct igb_q_vector *q_vector = data;
3767 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3768 switch (hw->mac.type) {
3769 case e1000_82576:
3770 wr32(ring->itr_register, ring->itr_val |
3771 0x80000000);
3772 break;
3773 default:
3774 wr32(ring->itr_register, ring->itr_val |
3775 (ring->itr_val << 16));
3776 break;
3777 }
3778 ring->set_itr = 0;
3779 }
3780}
3781 4007
3782static irqreturn_t igb_msix_rx(int irq, void *data) 4008 /* Write the ITR value calculated from the previous interrupt. */
3783{ 4009 igb_write_itr(q_vector);
3784 struct igb_ring *rx_ring = data;
3785 4010
3786 /* Write the ITR value calculated at the end of the 4011 napi_schedule(&q_vector->napi);
3787 * previous interrupt.
3788 */
3789
3790 igb_write_itr(rx_ring);
3791 4012
3792 if (napi_schedule_prep(&rx_ring->napi)) 4013 return IRQ_HANDLED;
3793 __napi_schedule(&rx_ring->napi);
3794
3795#ifdef CONFIG_IGB_DCA
3796 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3797 igb_update_rx_dca(rx_ring);
3798#endif
3799 return IRQ_HANDLED;
3800} 4014}
3801 4015
3802#ifdef CONFIG_IGB_DCA 4016#ifdef CONFIG_IGB_DCA
3803static void igb_update_rx_dca(struct igb_ring *rx_ring) 4017static void igb_update_dca(struct igb_q_vector *q_vector)
3804{ 4018{
3805 u32 dca_rxctrl; 4019 struct igb_adapter *adapter = q_vector->adapter;
3806 struct igb_adapter *adapter = rx_ring->adapter;
3807 struct e1000_hw *hw = &adapter->hw; 4020 struct e1000_hw *hw = &adapter->hw;
3808 int cpu = get_cpu(); 4021 int cpu = get_cpu();
3809 int q = rx_ring->reg_idx;
3810 4022
3811 if (rx_ring->cpu != cpu) { 4023 if (q_vector->cpu == cpu)
3812 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); 4024 goto out_no_update;
3813 if (hw->mac.type == e1000_82576) { 4025
3814 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; 4026 if (q_vector->tx_ring) {
3815 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << 4027 int q = q_vector->tx_ring->reg_idx;
3816 E1000_DCA_RXCTRL_CPUID_SHIFT; 4028 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4029 if (hw->mac.type == e1000_82575) {
4030 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4031 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3817 } else { 4032 } else {
4033 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4034 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4035 E1000_DCA_TXCTRL_CPUID_SHIFT;
4036 }
4037 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4038 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4039 }
4040 if (q_vector->rx_ring) {
4041 int q = q_vector->rx_ring->reg_idx;
4042 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4043 if (hw->mac.type == e1000_82575) {
3818 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; 4044 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3819 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 4045 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4046 } else {
4047 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4048 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4049 E1000_DCA_RXCTRL_CPUID_SHIFT;
3820 } 4050 }
3821 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; 4051 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3822 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; 4052 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3823 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; 4053 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3824 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); 4054 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3825 rx_ring->cpu = cpu;
3826 }
3827 put_cpu();
3828}
3829
3830static void igb_update_tx_dca(struct igb_ring *tx_ring)
3831{
3832 u32 dca_txctrl;
3833 struct igb_adapter *adapter = tx_ring->adapter;
3834 struct e1000_hw *hw = &adapter->hw;
3835 int cpu = get_cpu();
3836 int q = tx_ring->reg_idx;
3837
3838 if (tx_ring->cpu != cpu) {
3839 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3840 if (hw->mac.type == e1000_82576) {
3841 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3842 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3843 E1000_DCA_TXCTRL_CPUID_SHIFT;
3844 } else {
3845 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3846 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3847 }
3848 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3849 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3850 tx_ring->cpu = cpu;
3851 } 4055 }
4056 q_vector->cpu = cpu;
4057out_no_update:
3852 put_cpu(); 4058 put_cpu();
3853} 4059}
3854 4060
@@ -3863,13 +4069,10 @@ static void igb_setup_dca(struct igb_adapter *adapter)
3863 /* Always use CB2 mode, difference is masked in the CB driver. */ 4069 /* Always use CB2 mode, difference is masked in the CB driver. */
3864 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); 4070 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3865 4071
3866 for (i = 0; i < adapter->num_tx_queues; i++) { 4072 for (i = 0; i < adapter->num_q_vectors; i++) {
3867 adapter->tx_ring[i].cpu = -1; 4073 struct igb_q_vector *q_vector = adapter->q_vector[i];
3868 igb_update_tx_dca(&adapter->tx_ring[i]); 4074 q_vector->cpu = -1;
3869 } 4075 igb_update_dca(q_vector);
3870 for (i = 0; i < adapter->num_rx_queues; i++) {
3871 adapter->rx_ring[i].cpu = -1;
3872 igb_update_rx_dca(&adapter->rx_ring[i]);
3873 } 4076 }
3874} 4077}
3875 4078
@@ -3877,6 +4080,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
3877{ 4080{
3878 struct net_device *netdev = dev_get_drvdata(dev); 4081 struct net_device *netdev = dev_get_drvdata(dev);
3879 struct igb_adapter *adapter = netdev_priv(netdev); 4082 struct igb_adapter *adapter = netdev_priv(netdev);
4083 struct pci_dev *pdev = adapter->pdev;
3880 struct e1000_hw *hw = &adapter->hw; 4084 struct e1000_hw *hw = &adapter->hw;
3881 unsigned long event = *(unsigned long *)data; 4085 unsigned long event = *(unsigned long *)data;
3882 4086
@@ -3885,12 +4089,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
3885 /* if already enabled, don't do it again */ 4089 /* if already enabled, don't do it again */
3886 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 4090 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3887 break; 4091 break;
3888 /* Always use CB2 mode, difference is masked
3889 * in the CB driver. */
3890 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3891 if (dca_add_requester(dev) == 0) { 4092 if (dca_add_requester(dev) == 0) {
3892 adapter->flags |= IGB_FLAG_DCA_ENABLED; 4093 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3893 dev_info(&adapter->pdev->dev, "DCA enabled\n"); 4094 dev_info(&pdev->dev, "DCA enabled\n");
3894 igb_setup_dca(adapter); 4095 igb_setup_dca(adapter);
3895 break; 4096 break;
3896 } 4097 }
@@ -3898,9 +4099,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
3898 case DCA_PROVIDER_REMOVE: 4099 case DCA_PROVIDER_REMOVE:
3899 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 4100 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3900 /* without this a class_device is left 4101 /* without this a class_device is left
3901 * hanging around in the sysfs model */ 4102 * hanging around in the sysfs model */
3902 dca_remove_requester(dev); 4103 dca_remove_requester(dev);
3903 dev_info(&adapter->pdev->dev, "DCA disabled\n"); 4104 dev_info(&pdev->dev, "DCA disabled\n");
3904 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 4105 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3905 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); 4106 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3906 } 4107 }
@@ -3930,12 +4131,51 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter)
3930 4131
3931 for (i = 0 ; i < adapter->vfs_allocated_count; i++) { 4132 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
3932 ping = E1000_PF_CONTROL_MSG; 4133 ping = E1000_PF_CONTROL_MSG;
3933 if (adapter->vf_data[i].clear_to_send) 4134 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
3934 ping |= E1000_VT_MSGTYPE_CTS; 4135 ping |= E1000_VT_MSGTYPE_CTS;
3935 igb_write_mbx(hw, &ping, 1, i); 4136 igb_write_mbx(hw, &ping, 1, i);
3936 } 4137 }
3937} 4138}
3938 4139
4140static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4141{
4142 struct e1000_hw *hw = &adapter->hw;
4143 u32 vmolr = rd32(E1000_VMOLR(vf));
4144 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4145
4146 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4147 IGB_VF_FLAG_MULTI_PROMISC);
4148 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4149
4150 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4151 vmolr |= E1000_VMOLR_MPME;
4152 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4153 } else {
4154 /*
4155 * if we have hashes and we are clearing a multicast promisc
4156 * flag we need to write the hashes to the MTA as this step
4157 * was previously skipped
4158 */
4159 if (vf_data->num_vf_mc_hashes > 30) {
4160 vmolr |= E1000_VMOLR_MPME;
4161 } else if (vf_data->num_vf_mc_hashes) {
4162 int j;
4163 vmolr |= E1000_VMOLR_ROMPE;
4164 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4165 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4166 }
4167 }
4168
4169 wr32(E1000_VMOLR(vf), vmolr);
4170
4171 /* there are flags left unprocessed, likely not supported */
4172 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4173 return -EINVAL;
4174
4175 return 0;
4176
4177}
4178
3939static int igb_set_vf_multicasts(struct igb_adapter *adapter, 4179static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3940 u32 *msgbuf, u32 vf) 4180 u32 *msgbuf, u32 vf)
3941{ 4181{
@@ -3944,18 +4184,17 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3944 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 4184 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
3945 int i; 4185 int i;
3946 4186
3947 /* only up to 30 hash values supported */ 4187 /* salt away the number of multicast addresses assigned
3948 if (n > 30)
3949 n = 30;
3950
3951 /* salt away the number of multi cast addresses assigned
3952 * to this VF for later use to restore when the PF multi cast 4188 * to this VF for later use to restore when the PF multi cast
3953 * list changes 4189 * list changes
3954 */ 4190 */
3955 vf_data->num_vf_mc_hashes = n; 4191 vf_data->num_vf_mc_hashes = n;
3956 4192
3957 /* VFs are limited to using the MTA hash table for their multicast 4193 /* only up to 30 hash values supported */
3958 * addresses */ 4194 if (n > 30)
4195 n = 30;
4196
4197 /* store the hashes for later use */
3959 for (i = 0; i < n; i++) 4198 for (i = 0; i < n; i++)
3960 vf_data->vf_mc_hashes[i] = hash_list[i]; 4199 vf_data->vf_mc_hashes[i] = hash_list[i];
3961 4200
@@ -3972,9 +4211,20 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
3972 int i, j; 4211 int i, j;
3973 4212
3974 for (i = 0; i < adapter->vfs_allocated_count; i++) { 4213 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4214 u32 vmolr = rd32(E1000_VMOLR(i));
4215 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4216
3975 vf_data = &adapter->vf_data[i]; 4217 vf_data = &adapter->vf_data[i];
3976 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) 4218
3977 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); 4219 if ((vf_data->num_vf_mc_hashes > 30) ||
4220 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4221 vmolr |= E1000_VMOLR_MPME;
4222 } else if (vf_data->num_vf_mc_hashes) {
4223 vmolr |= E1000_VMOLR_ROMPE;
4224 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4225 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4226 }
4227 wr32(E1000_VMOLR(i), vmolr);
3978 } 4228 }
3979} 4229}
3980 4230
@@ -4012,7 +4262,11 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4012 struct e1000_hw *hw = &adapter->hw; 4262 struct e1000_hw *hw = &adapter->hw;
4013 u32 reg, i; 4263 u32 reg, i;
4014 4264
4015 /* It is an error to call this function when VFs are not enabled */ 4265 /* The vlvf table only exists on 82576 hardware and newer */
4266 if (hw->mac.type < e1000_82576)
4267 return -1;
4268
4269 /* we only need to do this if VMDq is enabled */
4016 if (!adapter->vfs_allocated_count) 4270 if (!adapter->vfs_allocated_count)
4017 return -1; 4271 return -1;
4018 4272
@@ -4042,16 +4296,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4042 4296
4043 /* if !enabled we need to set this up in vfta */ 4297 /* if !enabled we need to set this up in vfta */
4044 if (!(reg & E1000_VLVF_VLANID_ENABLE)) { 4298 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
4045 /* add VID to filter table, if bit already set 4299 /* add VID to filter table */
4046 * PF must have added it outside of table */ 4300 igb_vfta_set(hw, vid, true);
4047 if (igb_vfta_set(hw, vid, true))
4048 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
4049 adapter->vfs_allocated_count);
4050 reg |= E1000_VLVF_VLANID_ENABLE; 4301 reg |= E1000_VLVF_VLANID_ENABLE;
4051 } 4302 }
4052 reg &= ~E1000_VLVF_VLANID_MASK; 4303 reg &= ~E1000_VLVF_VLANID_MASK;
4053 reg |= vid; 4304 reg |= vid;
4054
4055 wr32(E1000_VLVF(i), reg); 4305 wr32(E1000_VLVF(i), reg);
4056 4306
4057 /* do not modify RLPML for PF devices */ 4307 /* do not modify RLPML for PF devices */
@@ -4067,8 +4317,8 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4067 reg |= size; 4317 reg |= size;
4068 wr32(E1000_VMOLR(vf), reg); 4318 wr32(E1000_VMOLR(vf), reg);
4069 } 4319 }
4070 adapter->vf_data[vf].vlans_enabled++;
4071 4320
4321 adapter->vf_data[vf].vlans_enabled++;
4072 return 0; 4322 return 0;
4073 } 4323 }
4074 } else { 4324 } else {
@@ -4110,15 +4360,14 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4110 return igb_vlvf_set(adapter, vid, add, vf); 4360 return igb_vlvf_set(adapter, vid, add, vf);
4111} 4361}
4112 4362
4113static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) 4363static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4114{ 4364{
4115 struct e1000_hw *hw = &adapter->hw; 4365 /* clear all flags */
4116 4366 adapter->vf_data[vf].flags = 0;
4117 /* disable mailbox functionality for vf */ 4367 adapter->vf_data[vf].last_nack = jiffies;
4118 adapter->vf_data[vf].clear_to_send = false;
4119 4368
4120 /* reset offloads to defaults */ 4369 /* reset offloads to defaults */
4121 igb_set_vmolr(hw, vf); 4370 igb_set_vmolr(adapter, vf);
4122 4371
4123 /* reset vlans for device */ 4372 /* reset vlans for device */
4124 igb_clear_vf_vfta(adapter, vf); 4373 igb_clear_vf_vfta(adapter, vf);
@@ -4130,7 +4379,18 @@ static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4130 igb_set_rx_mode(adapter->netdev); 4379 igb_set_rx_mode(adapter->netdev);
4131} 4380}
4132 4381
4133static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) 4382static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4383{
4384 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4385
4386 /* generate a new mac address as we were hotplug removed/added */
4387 random_ether_addr(vf_mac);
4388
4389 /* process remaining reset events */
4390 igb_vf_reset(adapter, vf);
4391}
4392
4393static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4134{ 4394{
4135 struct e1000_hw *hw = &adapter->hw; 4395 struct e1000_hw *hw = &adapter->hw;
4136 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; 4396 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
@@ -4139,11 +4399,10 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4139 u8 *addr = (u8 *)(&msgbuf[1]); 4399 u8 *addr = (u8 *)(&msgbuf[1]);
4140 4400
4141 /* process all the same items cleared in a function level reset */ 4401 /* process all the same items cleared in a function level reset */
4142 igb_vf_reset_event(adapter, vf); 4402 igb_vf_reset(adapter, vf);
4143 4403
4144 /* set vf mac address */ 4404 /* set vf mac address */
4145 igb_rar_set(hw, vf_mac, rar_entry); 4405 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4146 igb_set_rah_pool(hw, vf, rar_entry);
4147 4406
4148 /* enable transmit and receive for vf */ 4407 /* enable transmit and receive for vf */
4149 reg = rd32(E1000_VFTE); 4408 reg = rd32(E1000_VFTE);
@@ -4151,8 +4410,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4151 reg = rd32(E1000_VFRE); 4410 reg = rd32(E1000_VFRE);
4152 wr32(E1000_VFRE, reg | (1 << vf)); 4411 wr32(E1000_VFRE, reg | (1 << vf));
4153 4412
4154 /* enable mailbox functionality for vf */ 4413 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
4155 adapter->vf_data[vf].clear_to_send = true;
4156 4414
4157 /* reply to reset with ack and vf mac address */ 4415 /* reply to reset with ack and vf mac address */
4158 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 4416 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
@@ -4162,66 +4420,45 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4162 4420
4163static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) 4421static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4164{ 4422{
4165 unsigned char *addr = (char *)&msg[1]; 4423 unsigned char *addr = (char *)&msg[1];
4166 int err = -1; 4424 int err = -1;
4167
4168 if (is_valid_ether_addr(addr))
4169 err = igb_set_vf_mac(adapter, vf, addr);
4170 4425
4171 return err; 4426 if (is_valid_ether_addr(addr))
4427 err = igb_set_vf_mac(adapter, vf, addr);
4172 4428
4429 return err;
4173} 4430}
4174 4431
4175static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) 4432static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4176{ 4433{
4177 struct e1000_hw *hw = &adapter->hw; 4434 struct e1000_hw *hw = &adapter->hw;
4435 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4178 u32 msg = E1000_VT_MSGTYPE_NACK; 4436 u32 msg = E1000_VT_MSGTYPE_NACK;
4179 4437
4180 /* if device isn't clear to send it shouldn't be reading either */ 4438 /* if device isn't clear to send it shouldn't be reading either */
4181 if (!adapter->vf_data[vf].clear_to_send) 4439 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4440 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4182 igb_write_mbx(hw, &msg, 1, vf); 4441 igb_write_mbx(hw, &msg, 1, vf);
4183} 4442 vf_data->last_nack = jiffies;
4184
4185
4186static void igb_msg_task(struct igb_adapter *adapter)
4187{
4188 struct e1000_hw *hw = &adapter->hw;
4189 u32 vf;
4190
4191 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4192 /* process any reset requests */
4193 if (!igb_check_for_rst(hw, vf)) {
4194 adapter->vf_data[vf].clear_to_send = false;
4195 igb_vf_reset_event(adapter, vf);
4196 }
4197
4198 /* process any messages pending */
4199 if (!igb_check_for_msg(hw, vf))
4200 igb_rcv_msg_from_vf(adapter, vf);
4201
4202 /* process any acks */
4203 if (!igb_check_for_ack(hw, vf))
4204 igb_rcv_ack_from_vf(adapter, vf);
4205
4206 } 4443 }
4207} 4444}
4208 4445
4209static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) 4446static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4210{ 4447{
4211 u32 mbx_size = E1000_VFMAILBOX_SIZE; 4448 struct pci_dev *pdev = adapter->pdev;
4212 u32 msgbuf[mbx_size]; 4449 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4213 struct e1000_hw *hw = &adapter->hw; 4450 struct e1000_hw *hw = &adapter->hw;
4451 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4214 s32 retval; 4452 s32 retval;
4215 4453
4216 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf); 4454 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4217 4455
4218 if (retval) 4456 if (retval)
4219 dev_err(&adapter->pdev->dev, 4457 dev_err(&pdev->dev, "Error receiving message from VF\n");
4220 "Error receiving message from VF\n");
4221 4458
4222 /* this is a message we already processed, do nothing */ 4459 /* this is a message we already processed, do nothing */
4223 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) 4460 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4224 return retval; 4461 return;
4225 4462
4226 /* 4463 /*
4227 * until the vf completes a reset it should not be 4464 * until the vf completes a reset it should not be
@@ -4230,20 +4467,25 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4230 4467
4231 if (msgbuf[0] == E1000_VF_RESET) { 4468 if (msgbuf[0] == E1000_VF_RESET) {
4232 igb_vf_reset_msg(adapter, vf); 4469 igb_vf_reset_msg(adapter, vf);
4233 4470 return;
4234 return retval;
4235 } 4471 }
4236 4472
4237 if (!adapter->vf_data[vf].clear_to_send) { 4473 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
4238 msgbuf[0] |= E1000_VT_MSGTYPE_NACK; 4474 msgbuf[0] = E1000_VT_MSGTYPE_NACK;
4239 igb_write_mbx(hw, msgbuf, 1, vf); 4475 if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4240 return retval; 4476 igb_write_mbx(hw, msgbuf, 1, vf);
4477 vf_data->last_nack = jiffies;
4478 }
4479 return;
4241 } 4480 }
4242 4481
4243 switch ((msgbuf[0] & 0xFFFF)) { 4482 switch ((msgbuf[0] & 0xFFFF)) {
4244 case E1000_VF_SET_MAC_ADDR: 4483 case E1000_VF_SET_MAC_ADDR:
4245 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 4484 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4246 break; 4485 break;
4486 case E1000_VF_SET_PROMISC:
4487 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4488 break;
4247 case E1000_VF_SET_MULTICAST: 4489 case E1000_VF_SET_MULTICAST:
4248 retval = igb_set_vf_multicasts(adapter, msgbuf, vf); 4490 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4249 break; 4491 break;
@@ -4254,7 +4496,7 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4254 retval = igb_set_vf_vlan(adapter, msgbuf, vf); 4496 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4255 break; 4497 break;
4256 default: 4498 default:
4257 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); 4499 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4258 retval = -1; 4500 retval = -1;
4259 break; 4501 break;
4260 } 4502 }
@@ -4268,8 +4510,53 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4268 msgbuf[0] |= E1000_VT_MSGTYPE_CTS; 4510 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4269 4511
4270 igb_write_mbx(hw, msgbuf, 1, vf); 4512 igb_write_mbx(hw, msgbuf, 1, vf);
4513}
4271 4514
4272 return retval; 4515static void igb_msg_task(struct igb_adapter *adapter)
4516{
4517 struct e1000_hw *hw = &adapter->hw;
4518 u32 vf;
4519
4520 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4521 /* process any reset requests */
4522 if (!igb_check_for_rst(hw, vf))
4523 igb_vf_reset_event(adapter, vf);
4524
4525 /* process any messages pending */
4526 if (!igb_check_for_msg(hw, vf))
4527 igb_rcv_msg_from_vf(adapter, vf);
4528
4529 /* process any acks */
4530 if (!igb_check_for_ack(hw, vf))
4531 igb_rcv_ack_from_vf(adapter, vf);
4532 }
4533}
4534
4535/**
4536 * igb_set_uta - Set unicast filter table address
4537 * @adapter: board private structure
4538 *
4539 * The unicast table address is a register array of 32-bit registers.
4540 * The table is meant to be used in a way similar to how the MTA is used
4541 * however due to certain limitations in the hardware it is necessary to
4542 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4543 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4544 **/
4545static void igb_set_uta(struct igb_adapter *adapter)
4546{
4547 struct e1000_hw *hw = &adapter->hw;
4548 int i;
4549
4550 /* The UTA table only exists on 82576 hardware and newer */
4551 if (hw->mac.type < e1000_82576)
4552 return;
4553
4554 /* we only need to do this if VMDq is enabled */
4555 if (!adapter->vfs_allocated_count)
4556 return;
4557
4558 for (i = 0; i < hw->mac.uta_reg_count; i++)
4559 array_wr32(E1000_UTA, i, ~0);
4273} 4560}
4274 4561
4275/** 4562/**
@@ -4279,15 +4566,15 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4279 **/ 4566 **/
4280static irqreturn_t igb_intr_msi(int irq, void *data) 4567static irqreturn_t igb_intr_msi(int irq, void *data)
4281{ 4568{
4282 struct net_device *netdev = data; 4569 struct igb_adapter *adapter = data;
4283 struct igb_adapter *adapter = netdev_priv(netdev); 4570 struct igb_q_vector *q_vector = adapter->q_vector[0];
4284 struct e1000_hw *hw = &adapter->hw; 4571 struct e1000_hw *hw = &adapter->hw;
4285 /* read ICR disables interrupts using IAM */ 4572 /* read ICR disables interrupts using IAM */
4286 u32 icr = rd32(E1000_ICR); 4573 u32 icr = rd32(E1000_ICR);
4287 4574
4288 igb_write_itr(adapter->rx_ring); 4575 igb_write_itr(q_vector);
4289 4576
4290 if(icr & E1000_ICR_DOUTSYNC) { 4577 if (icr & E1000_ICR_DOUTSYNC) {
4291 /* HW is reporting DMA is out of sync */ 4578 /* HW is reporting DMA is out of sync */
4292 adapter->stats.doosync++; 4579 adapter->stats.doosync++;
4293 } 4580 }
@@ -4298,7 +4585,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4298 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4585 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4299 } 4586 }
4300 4587
4301 napi_schedule(&adapter->rx_ring[0].napi); 4588 napi_schedule(&q_vector->napi);
4302 4589
4303 return IRQ_HANDLED; 4590 return IRQ_HANDLED;
4304} 4591}
@@ -4310,8 +4597,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4310 **/ 4597 **/
4311static irqreturn_t igb_intr(int irq, void *data) 4598static irqreturn_t igb_intr(int irq, void *data)
4312{ 4599{
4313 struct net_device *netdev = data; 4600 struct igb_adapter *adapter = data;
4314 struct igb_adapter *adapter = netdev_priv(netdev); 4601 struct igb_q_vector *q_vector = adapter->q_vector[0];
4315 struct e1000_hw *hw = &adapter->hw; 4602 struct e1000_hw *hw = &adapter->hw;
4316 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 4603 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4317 * need for the IMC write */ 4604 * need for the IMC write */
@@ -4319,14 +4606,14 @@ static irqreturn_t igb_intr(int irq, void *data)
4319 if (!icr) 4606 if (!icr)
4320 return IRQ_NONE; /* Not our interrupt */ 4607 return IRQ_NONE; /* Not our interrupt */
4321 4608
4322 igb_write_itr(adapter->rx_ring); 4609 igb_write_itr(q_vector);
4323 4610
4324 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 4611 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4325 * not set, then the adapter didn't send an interrupt */ 4612 * not set, then the adapter didn't send an interrupt */
4326 if (!(icr & E1000_ICR_INT_ASSERTED)) 4613 if (!(icr & E1000_ICR_INT_ASSERTED))
4327 return IRQ_NONE; 4614 return IRQ_NONE;
4328 4615
4329 if(icr & E1000_ICR_DOUTSYNC) { 4616 if (icr & E1000_ICR_DOUTSYNC) {
4330 /* HW is reporting DMA is out of sync */ 4617 /* HW is reporting DMA is out of sync */
4331 adapter->stats.doosync++; 4618 adapter->stats.doosync++;
4332 } 4619 }
@@ -4338,26 +4625,27 @@ static irqreturn_t igb_intr(int irq, void *data)
4338 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4625 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4339 } 4626 }
4340 4627
4341 napi_schedule(&adapter->rx_ring[0].napi); 4628 napi_schedule(&q_vector->napi);
4342 4629
4343 return IRQ_HANDLED; 4630 return IRQ_HANDLED;
4344} 4631}
4345 4632
4346static inline void igb_rx_irq_enable(struct igb_ring *rx_ring) 4633static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
4347{ 4634{
4348 struct igb_adapter *adapter = rx_ring->adapter; 4635 struct igb_adapter *adapter = q_vector->adapter;
4349 struct e1000_hw *hw = &adapter->hw; 4636 struct e1000_hw *hw = &adapter->hw;
4350 4637
4351 if (adapter->itr_setting & 3) { 4638 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4352 if (adapter->num_rx_queues == 1) 4639 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
4640 if (!adapter->msix_entries)
4353 igb_set_itr(adapter); 4641 igb_set_itr(adapter);
4354 else 4642 else
4355 igb_update_ring_itr(rx_ring); 4643 igb_update_ring_itr(q_vector);
4356 } 4644 }
4357 4645
4358 if (!test_bit(__IGB_DOWN, &adapter->state)) { 4646 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4359 if (adapter->msix_entries) 4647 if (adapter->msix_entries)
4360 wr32(E1000_EIMS, rx_ring->eims_value); 4648 wr32(E1000_EIMS, q_vector->eims_value);
4361 else 4649 else
4362 igb_irq_enable(adapter); 4650 igb_irq_enable(adapter);
4363 } 4651 }
@@ -4370,76 +4658,94 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
4370 **/ 4658 **/
4371static int igb_poll(struct napi_struct *napi, int budget) 4659static int igb_poll(struct napi_struct *napi, int budget)
4372{ 4660{
4373 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); 4661 struct igb_q_vector *q_vector = container_of(napi,
4374 int work_done = 0; 4662 struct igb_q_vector,
4663 napi);
4664 int tx_clean_complete = 1, work_done = 0;
4375 4665
4376#ifdef CONFIG_IGB_DCA 4666#ifdef CONFIG_IGB_DCA
4377 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 4667 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4378 igb_update_rx_dca(rx_ring); 4668 igb_update_dca(q_vector);
4379#endif 4669#endif
4380 igb_clean_rx_irq_adv(rx_ring, &work_done, budget); 4670 if (q_vector->tx_ring)
4671 tx_clean_complete = igb_clean_tx_irq(q_vector);
4381 4672
4382 if (rx_ring->buddy) { 4673 if (q_vector->rx_ring)
4383#ifdef CONFIG_IGB_DCA 4674 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4384 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 4675
4385 igb_update_tx_dca(rx_ring->buddy); 4676 if (!tx_clean_complete)
4386#endif 4677 work_done = budget;
4387 if (!igb_clean_tx_irq(rx_ring->buddy))
4388 work_done = budget;
4389 }
4390 4678
4391 /* If not enough Rx work done, exit the polling mode */ 4679 /* If not enough Rx work done, exit the polling mode */
4392 if (work_done < budget) { 4680 if (work_done < budget) {
4393 napi_complete(napi); 4681 napi_complete(napi);
4394 igb_rx_irq_enable(rx_ring); 4682 igb_ring_irq_enable(q_vector);
4395 } 4683 }
4396 4684
4397 return work_done; 4685 return work_done;
4398} 4686}
4399 4687
4400/** 4688/**
4401 * igb_hwtstamp - utility function which checks for TX time stamp 4689 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
4402 * @adapter: board private structure 4690 * @adapter: board private structure
4691 * @shhwtstamps: timestamp structure to update
4692 * @regval: unsigned 64bit system time value.
4693 *
4694 * We need to convert the system time value stored in the RX/TXSTMP registers
4695 * into a hwtstamp which can be used by the upper level timestamping functions
4696 */
4697static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4698 struct skb_shared_hwtstamps *shhwtstamps,
4699 u64 regval)
4700{
4701 u64 ns;
4702
4703 ns = timecounter_cyc2time(&adapter->clock, regval);
4704 timecompare_update(&adapter->compare, ns);
4705 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4706 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4707 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4708}
4709
4710/**
4711 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4712 * @q_vector: pointer to q_vector containing needed info
4403 * @skb: packet that was just sent 4713 * @skb: packet that was just sent
4404 * 4714 *
4405 * If we were asked to do hardware stamping and such a time stamp is 4715 * If we were asked to do hardware stamping and such a time stamp is
4406 * available, then it must have been for this skb here because we only 4716 * available, then it must have been for this skb here because we only
4407 * allow only one such packet into the queue. 4717 * allow only one such packet into the queue.
4408 */ 4718 */
4409static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb) 4719static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4410{ 4720{
4721 struct igb_adapter *adapter = q_vector->adapter;
4411 union skb_shared_tx *shtx = skb_tx(skb); 4722 union skb_shared_tx *shtx = skb_tx(skb);
4412 struct e1000_hw *hw = &adapter->hw; 4723 struct e1000_hw *hw = &adapter->hw;
4724 struct skb_shared_hwtstamps shhwtstamps;
4725 u64 regval;
4413 4726
4414 if (unlikely(shtx->hardware)) { 4727 /* if skb does not support hw timestamp or TX stamp not valid exit */
4415 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID; 4728 if (likely(!shtx->hardware) ||
4416 if (valid) { 4729 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4417 u64 regval = rd32(E1000_TXSTMPL); 4730 return;
4418 u64 ns; 4731
4419 struct skb_shared_hwtstamps shhwtstamps; 4732 regval = rd32(E1000_TXSTMPL);
4420 4733 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4421 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 4734
4422 regval |= (u64)rd32(E1000_TXSTMPH) << 32; 4735 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4423 ns = timecounter_cyc2time(&adapter->clock, 4736 skb_tstamp_tx(skb, &shhwtstamps);
4424 regval);
4425 timecompare_update(&adapter->compare, ns);
4426 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4427 shhwtstamps.syststamp =
4428 timecompare_transform(&adapter->compare, ns);
4429 skb_tstamp_tx(skb, &shhwtstamps);
4430 }
4431 }
4432} 4737}
4433 4738
4434/** 4739/**
4435 * igb_clean_tx_irq - Reclaim resources after transmit completes 4740 * igb_clean_tx_irq - Reclaim resources after transmit completes
4436 * @adapter: board private structure 4741 * @q_vector: pointer to q_vector containing needed info
4437 * returns true if ring is completely cleaned 4742 * returns true if ring is completely cleaned
4438 **/ 4743 **/
4439static bool igb_clean_tx_irq(struct igb_ring *tx_ring) 4744static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4440{ 4745{
4441 struct igb_adapter *adapter = tx_ring->adapter; 4746 struct igb_adapter *adapter = q_vector->adapter;
4442 struct net_device *netdev = adapter->netdev; 4747 struct igb_ring *tx_ring = q_vector->tx_ring;
4748 struct net_device *netdev = tx_ring->netdev;
4443 struct e1000_hw *hw = &adapter->hw; 4749 struct e1000_hw *hw = &adapter->hw;
4444 struct igb_buffer *buffer_info; 4750 struct igb_buffer *buffer_info;
4445 struct sk_buff *skb; 4751 struct sk_buff *skb;
@@ -4470,10 +4776,10 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4470 total_packets += segs; 4776 total_packets += segs;
4471 total_bytes += bytecount; 4777 total_bytes += bytecount;
4472 4778
4473 igb_tx_hwtstamp(adapter, skb); 4779 igb_tx_hwtstamp(q_vector, skb);
4474 } 4780 }
4475 4781
4476 igb_unmap_and_free_tx_resource(adapter, buffer_info); 4782 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4477 tx_desc->wb.status = 0; 4783 tx_desc->wb.status = 0;
4478 4784
4479 i++; 4785 i++;
@@ -4496,7 +4802,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4496 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 4802 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4497 !(test_bit(__IGB_DOWN, &adapter->state))) { 4803 !(test_bit(__IGB_DOWN, &adapter->state))) {
4498 netif_wake_subqueue(netdev, tx_ring->queue_index); 4804 netif_wake_subqueue(netdev, tx_ring->queue_index);
4499 ++adapter->restart_queue; 4805 tx_ring->tx_stats.restart_queue++;
4500 } 4806 }
4501 } 4807 }
4502 4808
@@ -4511,7 +4817,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4511 E1000_STATUS_TXOFF)) { 4817 E1000_STATUS_TXOFF)) {
4512 4818
4513 /* detected Tx unit hang */ 4819 /* detected Tx unit hang */
4514 dev_err(&adapter->pdev->dev, 4820 dev_err(&tx_ring->pdev->dev,
4515 "Detected Tx Unit Hang\n" 4821 "Detected Tx Unit Hang\n"
4516 " Tx Queue <%d>\n" 4822 " Tx Queue <%d>\n"
4517 " TDH <%x>\n" 4823 " TDH <%x>\n"
@@ -4524,11 +4830,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4524 " jiffies <%lx>\n" 4830 " jiffies <%lx>\n"
4525 " desc.status <%x>\n", 4831 " desc.status <%x>\n",
4526 tx_ring->queue_index, 4832 tx_ring->queue_index,
4527 readl(adapter->hw.hw_addr + tx_ring->head), 4833 readl(tx_ring->head),
4528 readl(adapter->hw.hw_addr + tx_ring->tail), 4834 readl(tx_ring->tail),
4529 tx_ring->next_to_use, 4835 tx_ring->next_to_use,
4530 tx_ring->next_to_clean, 4836 tx_ring->next_to_clean,
4531 tx_ring->buffer_info[i].time_stamp, 4837 tx_ring->buffer_info[eop].time_stamp,
4532 eop, 4838 eop,
4533 jiffies, 4839 jiffies,
4534 eop_desc->wb.status); 4840 eop_desc->wb.status);
@@ -4539,43 +4845,38 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4539 tx_ring->total_packets += total_packets; 4845 tx_ring->total_packets += total_packets;
4540 tx_ring->tx_stats.bytes += total_bytes; 4846 tx_ring->tx_stats.bytes += total_bytes;
4541 tx_ring->tx_stats.packets += total_packets; 4847 tx_ring->tx_stats.packets += total_packets;
4542 adapter->net_stats.tx_bytes += total_bytes;
4543 adapter->net_stats.tx_packets += total_packets;
4544 return (count < tx_ring->count); 4848 return (count < tx_ring->count);
4545} 4849}
4546 4850
4547/** 4851/**
4548 * igb_receive_skb - helper function to handle rx indications 4852 * igb_receive_skb - helper function to handle rx indications
4549 * @ring: pointer to receive ring receving this packet 4853 * @q_vector: structure containing interrupt and ring information
4550 * @status: descriptor status field as written by hardware 4854 * @skb: packet to send up
4551 * @rx_desc: receive descriptor containing vlan and type information. 4855 * @vlan_tag: vlan tag for packet
4552 * @skb: pointer to sk_buff to be indicated to stack
4553 **/ 4856 **/
4554static void igb_receive_skb(struct igb_ring *ring, u8 status, 4857static void igb_receive_skb(struct igb_q_vector *q_vector,
4555 union e1000_adv_rx_desc * rx_desc, 4858 struct sk_buff *skb,
4556 struct sk_buff *skb) 4859 u16 vlan_tag)
4557{ 4860{
4558 struct igb_adapter * adapter = ring->adapter; 4861 struct igb_adapter *adapter = q_vector->adapter;
4559 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); 4862
4560 4863 if (vlan_tag)
4561 skb_record_rx_queue(skb, ring->queue_index); 4864 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
4562 if (vlan_extracted) 4865 vlan_tag, skb);
4563 vlan_gro_receive(&ring->napi, adapter->vlgrp,
4564 le16_to_cpu(rx_desc->wb.upper.vlan),
4565 skb);
4566 else 4866 else
4567 napi_gro_receive(&ring->napi, skb); 4867 napi_gro_receive(&q_vector->napi, skb);
4568} 4868}
4569 4869
4570static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, 4870static inline void igb_rx_checksum_adv(struct igb_ring *ring,
4571 u32 status_err, struct sk_buff *skb) 4871 u32 status_err, struct sk_buff *skb)
4572{ 4872{
4573 skb->ip_summed = CHECKSUM_NONE; 4873 skb->ip_summed = CHECKSUM_NONE;
4574 4874
4575 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 4875 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4576 if ((status_err & E1000_RXD_STAT_IXSM) || 4876 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
4577 (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED)) 4877 (status_err & E1000_RXD_STAT_IXSM))
4578 return; 4878 return;
4879
4579 /* TCP/UDP checksum error bit is set */ 4880 /* TCP/UDP checksum error bit is set */
4580 if (status_err & 4881 if (status_err &
4581 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { 4882 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -4584,9 +4885,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4584 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 4885 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4585 * packets, (aka let the stack check the crc32c) 4886 * packets, (aka let the stack check the crc32c)
4586 */ 4887 */
4587 if (!((adapter->hw.mac.type == e1000_82576) && 4888 if ((skb->len == 60) &&
4588 (skb->len == 60))) 4889 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
4589 adapter->hw_csum_err++; 4890 ring->rx_stats.csum_err++;
4891
4590 /* let the stack verify checksum errors */ 4892 /* let the stack verify checksum errors */
4591 return; 4893 return;
4592 } 4894 }
@@ -4594,11 +4896,38 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4594 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 4896 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4595 skb->ip_summed = CHECKSUM_UNNECESSARY; 4897 skb->ip_summed = CHECKSUM_UNNECESSARY;
4596 4898
4597 dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); 4899 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
4598 adapter->hw_csum_good++;
4599} 4900}
4600 4901
4601static inline u16 igb_get_hlen(struct igb_adapter *adapter, 4902static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
4903 struct sk_buff *skb)
4904{
4905 struct igb_adapter *adapter = q_vector->adapter;
4906 struct e1000_hw *hw = &adapter->hw;
4907 u64 regval;
4908
4909 /*
4910 * If this bit is set, then the RX registers contain the time stamp. No
4911 * other packet will be time stamped until we read these registers, so
4912 * read the registers to make them available again. Because only one
4913 * packet can be time stamped at a time, we know that the register
4914 * values must belong to this one here and therefore we don't need to
4915 * compare any of the additional attributes stored for it.
4916 *
4917 * If nothing went wrong, then it should have a skb_shared_tx that we
4918 * can turn into a skb_shared_hwtstamps.
4919 */
4920 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
4921 return;
4922 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
4923 return;
4924
4925 regval = rd32(E1000_RXSTMPL);
4926 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4927
4928 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
4929}
4930static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
4602 union e1000_adv_rx_desc *rx_desc) 4931 union e1000_adv_rx_desc *rx_desc)
4603{ 4932{
4604 /* HW will not DMA in data larger than the given buffer, even if it 4933 /* HW will not DMA in data larger than the given buffer, even if it
@@ -4607,27 +4936,28 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
4607 */ 4936 */
4608 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & 4937 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4609 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 4938 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4610 if (hlen > adapter->rx_ps_hdr_size) 4939 if (hlen > rx_ring->rx_buffer_len)
4611 hlen = adapter->rx_ps_hdr_size; 4940 hlen = rx_ring->rx_buffer_len;
4612 return hlen; 4941 return hlen;
4613} 4942}
4614 4943
4615static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, 4944static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4616 int *work_done, int budget) 4945 int *work_done, int budget)
4617{ 4946{
4618 struct igb_adapter *adapter = rx_ring->adapter; 4947 struct igb_ring *rx_ring = q_vector->rx_ring;
4619 struct net_device *netdev = adapter->netdev; 4948 struct net_device *netdev = rx_ring->netdev;
4620 struct e1000_hw *hw = &adapter->hw; 4949 struct pci_dev *pdev = rx_ring->pdev;
4621 struct pci_dev *pdev = adapter->pdev;
4622 union e1000_adv_rx_desc *rx_desc , *next_rxd; 4950 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4623 struct igb_buffer *buffer_info , *next_buffer; 4951 struct igb_buffer *buffer_info , *next_buffer;
4624 struct sk_buff *skb; 4952 struct sk_buff *skb;
4625 bool cleaned = false; 4953 bool cleaned = false;
4626 int cleaned_count = 0; 4954 int cleaned_count = 0;
4955 int current_node = numa_node_id();
4627 unsigned int total_bytes = 0, total_packets = 0; 4956 unsigned int total_bytes = 0, total_packets = 0;
4628 unsigned int i; 4957 unsigned int i;
4629 u32 staterr; 4958 u32 staterr;
4630 u16 length; 4959 u16 length;
4960 u16 vlan_tag;
4631 4961
4632 i = rx_ring->next_to_clean; 4962 i = rx_ring->next_to_clean;
4633 buffer_info = &rx_ring->buffer_info[i]; 4963 buffer_info = &rx_ring->buffer_info[i];
@@ -4646,6 +4976,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4646 i++; 4976 i++;
4647 if (i == rx_ring->count) 4977 if (i == rx_ring->count)
4648 i = 0; 4978 i = 0;
4979
4649 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); 4980 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4650 prefetch(next_rxd); 4981 prefetch(next_rxd);
4651 next_buffer = &rx_ring->buffer_info[i]; 4982 next_buffer = &rx_ring->buffer_info[i];
@@ -4654,23 +4985,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4654 cleaned = true; 4985 cleaned = true;
4655 cleaned_count++; 4986 cleaned_count++;
4656 4987
4657 /* this is the fast path for the non-packet split case */
4658 if (!adapter->rx_ps_hdr_size) {
4659 pci_unmap_single(pdev, buffer_info->dma,
4660 adapter->rx_buffer_len,
4661 PCI_DMA_FROMDEVICE);
4662 buffer_info->dma = 0;
4663 skb_put(skb, length);
4664 goto send_up;
4665 }
4666
4667 if (buffer_info->dma) { 4988 if (buffer_info->dma) {
4668 u16 hlen = igb_get_hlen(adapter, rx_desc);
4669 pci_unmap_single(pdev, buffer_info->dma, 4989 pci_unmap_single(pdev, buffer_info->dma,
4670 adapter->rx_ps_hdr_size, 4990 rx_ring->rx_buffer_len,
4671 PCI_DMA_FROMDEVICE); 4991 PCI_DMA_FROMDEVICE);
4672 buffer_info->dma = 0; 4992 buffer_info->dma = 0;
4673 skb_put(skb, hlen); 4993 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
4994 skb_put(skb, length);
4995 goto send_up;
4996 }
4997 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
4674 } 4998 }
4675 4999
4676 if (length) { 5000 if (length) {
@@ -4683,15 +5007,14 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4683 buffer_info->page_offset, 5007 buffer_info->page_offset,
4684 length); 5008 length);
4685 5009
4686 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 5010 if ((page_count(buffer_info->page) != 1) ||
4687 (page_count(buffer_info->page) != 1)) 5011 (page_to_nid(buffer_info->page) != current_node))
4688 buffer_info->page = NULL; 5012 buffer_info->page = NULL;
4689 else 5013 else
4690 get_page(buffer_info->page); 5014 get_page(buffer_info->page);
4691 5015
4692 skb->len += length; 5016 skb->len += length;
4693 skb->data_len += length; 5017 skb->data_len += length;
4694
4695 skb->truesize += length; 5018 skb->truesize += length;
4696 } 5019 }
4697 5020
@@ -4703,60 +5026,24 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4703 goto next_desc; 5026 goto next_desc;
4704 } 5027 }
4705send_up: 5028send_up:
4706 /*
4707 * If this bit is set, then the RX registers contain
4708 * the time stamp. No other packet will be time
4709 * stamped until we read these registers, so read the
4710 * registers to make them available again. Because
4711 * only one packet can be time stamped at a time, we
4712 * know that the register values must belong to this
4713 * one here and therefore we don't need to compare
4714 * any of the additional attributes stored for it.
4715 *
4716 * If nothing went wrong, then it should have a
4717 * skb_shared_tx that we can turn into a
4718 * skb_shared_hwtstamps.
4719 *
4720 * TODO: can time stamping be triggered (thus locking
4721 * the registers) without the packet reaching this point
4722 * here? In that case RX time stamping would get stuck.
4723 *
4724 * TODO: in "time stamp all packets" mode this bit is
4725 * not set. Need a global flag for this mode and then
4726 * always read the registers. Cannot be done without
4727 * a race condition.
4728 */
4729 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4730 u64 regval;
4731 u64 ns;
4732 struct skb_shared_hwtstamps *shhwtstamps =
4733 skb_hwtstamps(skb);
4734
4735 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4736 "igb: no RX time stamp available for time stamped packet");
4737 regval = rd32(E1000_RXSTMPL);
4738 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4739 ns = timecounter_cyc2time(&adapter->clock, regval);
4740 timecompare_update(&adapter->compare, ns);
4741 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4742 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4743 shhwtstamps->syststamp =
4744 timecompare_transform(&adapter->compare, ns);
4745 }
4746
4747 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 5029 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4748 dev_kfree_skb_irq(skb); 5030 dev_kfree_skb_irq(skb);
4749 goto next_desc; 5031 goto next_desc;
4750 } 5032 }
4751 5033
5034 igb_rx_hwtstamp(q_vector, staterr, skb);
4752 total_bytes += skb->len; 5035 total_bytes += skb->len;
4753 total_packets++; 5036 total_packets++;
4754 5037
4755 igb_rx_checksum_adv(adapter, staterr, skb); 5038 igb_rx_checksum_adv(rx_ring, staterr, skb);
4756 5039
4757 skb->protocol = eth_type_trans(skb, netdev); 5040 skb->protocol = eth_type_trans(skb, netdev);
5041 skb_record_rx_queue(skb, rx_ring->queue_index);
4758 5042
4759 igb_receive_skb(rx_ring, staterr, rx_desc, skb); 5043 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5044 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5045
5046 igb_receive_skb(q_vector, skb, vlan_tag);
4760 5047
4761next_desc: 5048next_desc:
4762 rx_desc->wb.upper.status_error = 0; 5049 rx_desc->wb.upper.status_error = 0;
@@ -4783,8 +5070,6 @@ next_desc:
4783 rx_ring->total_bytes += total_bytes; 5070 rx_ring->total_bytes += total_bytes;
4784 rx_ring->rx_stats.packets += total_packets; 5071 rx_ring->rx_stats.packets += total_packets;
4785 rx_ring->rx_stats.bytes += total_bytes; 5072 rx_ring->rx_stats.bytes += total_bytes;
4786 adapter->net_stats.rx_bytes += total_bytes;
4787 adapter->net_stats.rx_packets += total_packets;
4788 return cleaned; 5073 return cleaned;
4789} 5074}
4790 5075
@@ -4792,12 +5077,9 @@ next_desc:
4792 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split 5077 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4793 * @adapter: address of board private structure 5078 * @adapter: address of board private structure
4794 **/ 5079 **/
4795static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, 5080void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
4796 int cleaned_count)
4797{ 5081{
4798 struct igb_adapter *adapter = rx_ring->adapter; 5082 struct net_device *netdev = rx_ring->netdev;
4799 struct net_device *netdev = adapter->netdev;
4800 struct pci_dev *pdev = adapter->pdev;
4801 union e1000_adv_rx_desc *rx_desc; 5083 union e1000_adv_rx_desc *rx_desc;
4802 struct igb_buffer *buffer_info; 5084 struct igb_buffer *buffer_info;
4803 struct sk_buff *skb; 5085 struct sk_buff *skb;
@@ -4807,19 +5089,16 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4807 i = rx_ring->next_to_use; 5089 i = rx_ring->next_to_use;
4808 buffer_info = &rx_ring->buffer_info[i]; 5090 buffer_info = &rx_ring->buffer_info[i];
4809 5091
4810 if (adapter->rx_ps_hdr_size) 5092 bufsz = rx_ring->rx_buffer_len;
4811 bufsz = adapter->rx_ps_hdr_size;
4812 else
4813 bufsz = adapter->rx_buffer_len;
4814 5093
4815 while (cleaned_count--) { 5094 while (cleaned_count--) {
4816 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 5095 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4817 5096
4818 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { 5097 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
4819 if (!buffer_info->page) { 5098 if (!buffer_info->page) {
4820 buffer_info->page = alloc_page(GFP_ATOMIC); 5099 buffer_info->page = netdev_alloc_page(netdev);
4821 if (!buffer_info->page) { 5100 if (!buffer_info->page) {
4822 adapter->alloc_rx_buff_failed++; 5101 rx_ring->rx_stats.alloc_failed++;
4823 goto no_buffers; 5102 goto no_buffers;
4824 } 5103 }
4825 buffer_info->page_offset = 0; 5104 buffer_info->page_offset = 0;
@@ -4827,39 +5106,48 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4827 buffer_info->page_offset ^= PAGE_SIZE / 2; 5106 buffer_info->page_offset ^= PAGE_SIZE / 2;
4828 } 5107 }
4829 buffer_info->page_dma = 5108 buffer_info->page_dma =
4830 pci_map_page(pdev, buffer_info->page, 5109 pci_map_page(rx_ring->pdev, buffer_info->page,
4831 buffer_info->page_offset, 5110 buffer_info->page_offset,
4832 PAGE_SIZE / 2, 5111 PAGE_SIZE / 2,
4833 PCI_DMA_FROMDEVICE); 5112 PCI_DMA_FROMDEVICE);
5113 if (pci_dma_mapping_error(rx_ring->pdev,
5114 buffer_info->page_dma)) {
5115 buffer_info->page_dma = 0;
5116 rx_ring->rx_stats.alloc_failed++;
5117 goto no_buffers;
5118 }
4834 } 5119 }
4835 5120
4836 if (!buffer_info->skb) { 5121 skb = buffer_info->skb;
4837 skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); 5122 if (!skb) {
5123 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4838 if (!skb) { 5124 if (!skb) {
4839 adapter->alloc_rx_buff_failed++; 5125 rx_ring->rx_stats.alloc_failed++;
4840 goto no_buffers; 5126 goto no_buffers;
4841 } 5127 }
4842 5128
4843 /* Make buffer alignment 2 beyond a 16 byte boundary
4844 * this will result in a 16 byte aligned IP header after
4845 * the 14 byte MAC header is removed
4846 */
4847 skb_reserve(skb, NET_IP_ALIGN);
4848
4849 buffer_info->skb = skb; 5129 buffer_info->skb = skb;
4850 buffer_info->dma = pci_map_single(pdev, skb->data, 5130 }
5131 if (!buffer_info->dma) {
5132 buffer_info->dma = pci_map_single(rx_ring->pdev,
5133 skb->data,
4851 bufsz, 5134 bufsz,
4852 PCI_DMA_FROMDEVICE); 5135 PCI_DMA_FROMDEVICE);
5136 if (pci_dma_mapping_error(rx_ring->pdev,
5137 buffer_info->dma)) {
5138 buffer_info->dma = 0;
5139 rx_ring->rx_stats.alloc_failed++;
5140 goto no_buffers;
5141 }
4853 } 5142 }
4854 /* Refresh the desc even if buffer_addrs didn't change because 5143 /* Refresh the desc even if buffer_addrs didn't change because
4855 * each write-back erases this info. */ 5144 * each write-back erases this info. */
4856 if (adapter->rx_ps_hdr_size) { 5145 if (bufsz < IGB_RXBUFFER_1024) {
4857 rx_desc->read.pkt_addr = 5146 rx_desc->read.pkt_addr =
4858 cpu_to_le64(buffer_info->page_dma); 5147 cpu_to_le64(buffer_info->page_dma);
4859 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 5148 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4860 } else { 5149 } else {
4861 rx_desc->read.pkt_addr = 5150 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
4862 cpu_to_le64(buffer_info->dma);
4863 rx_desc->read.hdr_addr = 0; 5151 rx_desc->read.hdr_addr = 0;
4864 } 5152 }
4865 5153
@@ -4882,7 +5170,7 @@ no_buffers:
4882 * applicable for weak-ordered memory model archs, 5170 * applicable for weak-ordered memory model archs,
4883 * such as IA-64). */ 5171 * such as IA-64). */
4884 wmb(); 5172 wmb();
4885 writel(i, adapter->hw.hw_addr + rx_ring->tail); 5173 writel(i, rx_ring->tail);
4886 } 5174 }
4887} 5175}
4888 5176
@@ -4941,13 +5229,11 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4941 struct igb_adapter *adapter = netdev_priv(netdev); 5229 struct igb_adapter *adapter = netdev_priv(netdev);
4942 struct e1000_hw *hw = &adapter->hw; 5230 struct e1000_hw *hw = &adapter->hw;
4943 struct hwtstamp_config config; 5231 struct hwtstamp_config config;
4944 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED; 5232 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
4945 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED; 5233 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
4946 u32 tsync_rx_ctl_type = 0;
4947 u32 tsync_rx_cfg = 0; 5234 u32 tsync_rx_cfg = 0;
4948 int is_l4 = 0; 5235 bool is_l4 = false;
4949 int is_l2 = 0; 5236 bool is_l2 = false;
4950 short port = 319; /* PTP */
4951 u32 regval; 5237 u32 regval;
4952 5238
4953 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 5239 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -4959,10 +5245,8 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4959 5245
4960 switch (config.tx_type) { 5246 switch (config.tx_type) {
4961 case HWTSTAMP_TX_OFF: 5247 case HWTSTAMP_TX_OFF:
4962 tsync_tx_ctl_bit = 0; 5248 tsync_tx_ctl = 0;
4963 break;
4964 case HWTSTAMP_TX_ON: 5249 case HWTSTAMP_TX_ON:
4965 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4966 break; 5250 break;
4967 default: 5251 default:
4968 return -ERANGE; 5252 return -ERANGE;
@@ -4970,7 +5254,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4970 5254
4971 switch (config.rx_filter) { 5255 switch (config.rx_filter) {
4972 case HWTSTAMP_FILTER_NONE: 5256 case HWTSTAMP_FILTER_NONE:
4973 tsync_rx_ctl_bit = 0; 5257 tsync_rx_ctl = 0;
4974 break; 5258 break;
4975 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 5259 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4976 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 5260 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -4981,86 +5265,97 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4981 * possible to time stamp both Sync and Delay_Req messages 5265 * possible to time stamp both Sync and Delay_Req messages
4982 * => fall back to time stamping all packets 5266 * => fall back to time stamping all packets
4983 */ 5267 */
4984 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL; 5268 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
4985 config.rx_filter = HWTSTAMP_FILTER_ALL; 5269 config.rx_filter = HWTSTAMP_FILTER_ALL;
4986 break; 5270 break;
4987 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 5271 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4988 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; 5272 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
4989 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; 5273 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
4990 is_l4 = 1; 5274 is_l4 = true;
4991 break; 5275 break;
4992 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 5276 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4993 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; 5277 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
4994 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; 5278 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
4995 is_l4 = 1; 5279 is_l4 = true;
4996 break; 5280 break;
4997 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 5281 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4998 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 5282 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4999 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 5283 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5000 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; 5284 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5001 is_l2 = 1; 5285 is_l2 = true;
5002 is_l4 = 1; 5286 is_l4 = true;
5003 config.rx_filter = HWTSTAMP_FILTER_SOME; 5287 config.rx_filter = HWTSTAMP_FILTER_SOME;
5004 break; 5288 break;
5005 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 5289 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5006 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 5290 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5007 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 5291 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5008 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; 5292 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5009 is_l2 = 1; 5293 is_l2 = true;
5010 is_l4 = 1; 5294 is_l4 = true;
5011 config.rx_filter = HWTSTAMP_FILTER_SOME; 5295 config.rx_filter = HWTSTAMP_FILTER_SOME;
5012 break; 5296 break;
5013 case HWTSTAMP_FILTER_PTP_V2_EVENT: 5297 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5014 case HWTSTAMP_FILTER_PTP_V2_SYNC: 5298 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5015 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 5299 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5016 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2; 5300 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5017 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 5301 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5018 is_l2 = 1; 5302 is_l2 = true;
5019 break; 5303 break;
5020 default: 5304 default:
5021 return -ERANGE; 5305 return -ERANGE;
5022 } 5306 }
5023 5307
5308 if (hw->mac.type == e1000_82575) {
5309 if (tsync_rx_ctl | tsync_tx_ctl)
5310 return -EINVAL;
5311 return 0;
5312 }
5313
5024 /* enable/disable TX */ 5314 /* enable/disable TX */
5025 regval = rd32(E1000_TSYNCTXCTL); 5315 regval = rd32(E1000_TSYNCTXCTL);
5026 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit; 5316 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5317 regval |= tsync_tx_ctl;
5027 wr32(E1000_TSYNCTXCTL, regval); 5318 wr32(E1000_TSYNCTXCTL, regval);
5028 5319
5029 /* enable/disable RX, define which PTP packets are time stamped */ 5320 /* enable/disable RX */
5030 regval = rd32(E1000_TSYNCRXCTL); 5321 regval = rd32(E1000_TSYNCRXCTL);
5031 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit; 5322 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5032 regval = (regval & ~0xE) | tsync_rx_ctl_type; 5323 regval |= tsync_rx_ctl;
5033 wr32(E1000_TSYNCRXCTL, regval); 5324 wr32(E1000_TSYNCRXCTL, regval);
5034 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5035 5325
5036 /* 5326 /* define which PTP packets are time stamped */
5037 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7 5327 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5038 * (Ethertype to filter on)
5039 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
5040 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
5041 */
5042 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
5043
5044 /* L4 Queue Filter[0]: only filter by source and destination port */
5045 wr32(E1000_SPQF0, htons(port));
5046 wr32(E1000_IMIREXT(0), is_l4 ?
5047 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
5048 wr32(E1000_IMIR(0), is_l4 ?
5049 (htons(port)
5050 | (0<<16) /* immediate interrupt disabled */
5051 | 0 /* (1<<17) bit cleared: do not bypass
5052 destination port check */)
5053 : 0);
5054 wr32(E1000_FTQF0, is_l4 ?
5055 (0x11 /* UDP */
5056 | (1<<15) /* VF not compared */
5057 | (1<<27) /* Enable Timestamping */
5058 | (7<<28) /* only source port filter enabled,
5059 source/target address and protocol
5060 masked */)
5061 : ((1<<15) | (15<<28) /* all mask bits set = filter not
5062 enabled */));
5063 5328
5329 /* define ethertype filter for timestamped packets */
5330 if (is_l2)
5331 wr32(E1000_ETQF(3),
5332 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5333 E1000_ETQF_1588 | /* enable timestamping */
5334 ETH_P_1588)); /* 1588 eth protocol type */
5335 else
5336 wr32(E1000_ETQF(3), 0);
5337
5338#define PTP_PORT 319
5339 /* L4 Queue Filter[3]: filter by destination port and protocol */
5340 if (is_l4) {
5341 u32 ftqf = (IPPROTO_UDP /* UDP */
5342 | E1000_FTQF_VF_BP /* VF not compared */
5343 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5344 | E1000_FTQF_MASK); /* mask all inputs */
5345 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
5346
5347 wr32(E1000_IMIR(3), htons(PTP_PORT));
5348 wr32(E1000_IMIREXT(3),
5349 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5350 if (hw->mac.type == e1000_82576) {
5351 /* enable source port check */
5352 wr32(E1000_SPQF(3), htons(PTP_PORT));
5353 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5354 }
5355 wr32(E1000_FTQF(3), ftqf);
5356 } else {
5357 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5358 }
5064 wrfl(); 5359 wrfl();
5065 5360
5066 adapter->hwtstamp_config = config; 5361 adapter->hwtstamp_config = config;
@@ -5137,21 +5432,15 @@ static void igb_vlan_rx_register(struct net_device *netdev,
5137 ctrl |= E1000_CTRL_VME; 5432 ctrl |= E1000_CTRL_VME;
5138 wr32(E1000_CTRL, ctrl); 5433 wr32(E1000_CTRL, ctrl);
5139 5434
5140 /* enable VLAN receive filtering */ 5435 /* Disable CFI check */
5141 rctl = rd32(E1000_RCTL); 5436 rctl = rd32(E1000_RCTL);
5142 rctl &= ~E1000_RCTL_CFIEN; 5437 rctl &= ~E1000_RCTL_CFIEN;
5143 wr32(E1000_RCTL, rctl); 5438 wr32(E1000_RCTL, rctl);
5144 igb_update_mng_vlan(adapter);
5145 } else { 5439 } else {
5146 /* disable VLAN tag insert/strip */ 5440 /* disable VLAN tag insert/strip */
5147 ctrl = rd32(E1000_CTRL); 5441 ctrl = rd32(E1000_CTRL);
5148 ctrl &= ~E1000_CTRL_VME; 5442 ctrl &= ~E1000_CTRL_VME;
5149 wr32(E1000_CTRL, ctrl); 5443 wr32(E1000_CTRL, ctrl);
5150
5151 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
5152 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
5153 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
5154 }
5155 } 5444 }
5156 5445
5157 igb_rlpml_set(adapter); 5446 igb_rlpml_set(adapter);
@@ -5166,16 +5455,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5166 struct e1000_hw *hw = &adapter->hw; 5455 struct e1000_hw *hw = &adapter->hw;
5167 int pf_id = adapter->vfs_allocated_count; 5456 int pf_id = adapter->vfs_allocated_count;
5168 5457
5169 if ((hw->mng_cookie.status & 5458 /* attempt to add filter to vlvf array */
5170 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 5459 igb_vlvf_set(adapter, vid, true, pf_id);
5171 (vid == adapter->mng_vlan_id))
5172 return;
5173
5174 /* add vid to vlvf if sr-iov is enabled,
5175 * if that fails add directly to filter table */
5176 if (igb_vlvf_set(adapter, vid, true, pf_id))
5177 igb_vfta_set(hw, vid, true);
5178 5460
5461 /* add the filter since PF can receive vlans w/o entry in vlvf */
5462 igb_vfta_set(hw, vid, true);
5179} 5463}
5180 5464
5181static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 5465static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -5183,6 +5467,7 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5183 struct igb_adapter *adapter = netdev_priv(netdev); 5467 struct igb_adapter *adapter = netdev_priv(netdev);
5184 struct e1000_hw *hw = &adapter->hw; 5468 struct e1000_hw *hw = &adapter->hw;
5185 int pf_id = adapter->vfs_allocated_count; 5469 int pf_id = adapter->vfs_allocated_count;
5470 s32 err;
5186 5471
5187 igb_irq_disable(adapter); 5472 igb_irq_disable(adapter);
5188 vlan_group_set_device(adapter->vlgrp, vid, NULL); 5473 vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -5190,17 +5475,11 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5190 if (!test_bit(__IGB_DOWN, &adapter->state)) 5475 if (!test_bit(__IGB_DOWN, &adapter->state))
5191 igb_irq_enable(adapter); 5476 igb_irq_enable(adapter);
5192 5477
5193 if ((adapter->hw.mng_cookie.status & 5478 /* remove vlan from VLVF table array */
5194 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 5479 err = igb_vlvf_set(adapter, vid, false, pf_id);
5195 (vid == adapter->mng_vlan_id)) {
5196 /* release control to f/w */
5197 igb_release_hw_control(adapter);
5198 return;
5199 }
5200 5480
5201 /* remove vid from vlvf if sr-iov is enabled, 5481 /* if vid was not present in VLVF just remove it from table */
5202 * if not in vlvf remove from vfta */ 5482 if (err)
5203 if (igb_vlvf_set(adapter, vid, false, pf_id))
5204 igb_vfta_set(hw, vid, false); 5483 igb_vfta_set(hw, vid, false);
5205} 5484}
5206 5485
@@ -5220,6 +5499,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
5220 5499
5221int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) 5500int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5222{ 5501{
5502 struct pci_dev *pdev = adapter->pdev;
5223 struct e1000_mac_info *mac = &adapter->hw.mac; 5503 struct e1000_mac_info *mac = &adapter->hw.mac;
5224 5504
5225 mac->autoneg = 0; 5505 mac->autoneg = 0;
@@ -5243,8 +5523,7 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5243 break; 5523 break;
5244 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5524 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5245 default: 5525 default:
5246 dev_err(&adapter->pdev->dev, 5526 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
5247 "Unsupported Speed/Duplex configuration\n");
5248 return -EINVAL; 5527 return -EINVAL;
5249 } 5528 }
5250 return 0; 5529 return 0;
@@ -5266,9 +5545,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5266 if (netif_running(netdev)) 5545 if (netif_running(netdev))
5267 igb_close(netdev); 5546 igb_close(netdev);
5268 5547
5269 igb_reset_interrupt_capability(adapter); 5548 igb_clear_interrupt_scheme(adapter);
5270
5271 igb_free_queues(adapter);
5272 5549
5273#ifdef CONFIG_PM 5550#ifdef CONFIG_PM
5274 retval = pci_save_state(pdev); 5551 retval = pci_save_state(pdev);
@@ -5300,7 +5577,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5300 wr32(E1000_CTRL, ctrl); 5577 wr32(E1000_CTRL, ctrl);
5301 5578
5302 /* Allow time for pending master requests to run */ 5579 /* Allow time for pending master requests to run */
5303 igb_disable_pcie_master(&adapter->hw); 5580 igb_disable_pcie_master(hw);
5304 5581
5305 wr32(E1000_WUC, E1000_WUC_PME_EN); 5582 wr32(E1000_WUC, E1000_WUC_PME_EN);
5306 wr32(E1000_WUFC, wufc); 5583 wr32(E1000_WUFC, wufc);
@@ -5363,9 +5640,7 @@ static int igb_resume(struct pci_dev *pdev)
5363 pci_enable_wake(pdev, PCI_D3hot, 0); 5640 pci_enable_wake(pdev, PCI_D3hot, 0);
5364 pci_enable_wake(pdev, PCI_D3cold, 0); 5641 pci_enable_wake(pdev, PCI_D3cold, 0);
5365 5642
5366 igb_set_interrupt_capability(adapter); 5643 if (igb_init_interrupt_scheme(adapter)) {
5367
5368 if (igb_alloc_queues(adapter)) {
5369 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 5644 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5370 return -ENOMEM; 5645 return -ENOMEM;
5371 } 5646 }
@@ -5417,22 +5692,16 @@ static void igb_netpoll(struct net_device *netdev)
5417 int i; 5692 int i;
5418 5693
5419 if (!adapter->msix_entries) { 5694 if (!adapter->msix_entries) {
5695 struct igb_q_vector *q_vector = adapter->q_vector[0];
5420 igb_irq_disable(adapter); 5696 igb_irq_disable(adapter);
5421 napi_schedule(&adapter->rx_ring[0].napi); 5697 napi_schedule(&q_vector->napi);
5422 return; 5698 return;
5423 } 5699 }
5424 5700
5425 for (i = 0; i < adapter->num_tx_queues; i++) { 5701 for (i = 0; i < adapter->num_q_vectors; i++) {
5426 struct igb_ring *tx_ring = &adapter->tx_ring[i]; 5702 struct igb_q_vector *q_vector = adapter->q_vector[i];
5427 wr32(E1000_EIMC, tx_ring->eims_value); 5703 wr32(E1000_EIMC, q_vector->eims_value);
5428 igb_clean_tx_irq(tx_ring); 5704 napi_schedule(&q_vector->napi);
5429 wr32(E1000_EIMS, tx_ring->eims_value);
5430 }
5431
5432 for (i = 0; i < adapter->num_rx_queues; i++) {
5433 struct igb_ring *rx_ring = &adapter->rx_ring[i];
5434 wr32(E1000_EIMC, rx_ring->eims_value);
5435 napi_schedule(&rx_ring->napi);
5436 } 5705 }
5437} 5706}
5438#endif /* CONFIG_NET_POLL_CONTROLLER */ 5707#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -5532,6 +5801,33 @@ static void igb_io_resume(struct pci_dev *pdev)
5532 igb_get_hw_control(adapter); 5801 igb_get_hw_control(adapter);
5533} 5802}
5534 5803
5804static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5805 u8 qsel)
5806{
5807 u32 rar_low, rar_high;
5808 struct e1000_hw *hw = &adapter->hw;
5809
5810 /* HW expects these in little endian so we reverse the byte order
5811 * from network order (big endian) to little endian
5812 */
5813 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5814 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5815 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5816
5817 /* Indicate to hardware the Address is Valid. */
5818 rar_high |= E1000_RAH_AV;
5819
5820 if (hw->mac.type == e1000_82575)
5821 rar_high |= E1000_RAH_POOL_1 * qsel;
5822 else
5823 rar_high |= E1000_RAH_POOL_1 << qsel;
5824
5825 wr32(E1000_RAL(index), rar_low);
5826 wrfl();
5827 wr32(E1000_RAH(index), rar_high);
5828 wrfl();
5829}
5830
5535static int igb_set_vf_mac(struct igb_adapter *adapter, 5831static int igb_set_vf_mac(struct igb_adapter *adapter,
5536 int vf, unsigned char *mac_addr) 5832 int vf, unsigned char *mac_addr)
5537{ 5833{
@@ -5542,8 +5838,7 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5542 5838
5543 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 5839 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
5544 5840
5545 igb_rar_set(hw, mac_addr, rar_entry); 5841 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
5546 igb_set_rah_pool(hw, vf, rar_entry);
5547 5842
5548 return 0; 5843 return 0;
5549} 5844}
@@ -5551,19 +5846,29 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5551static void igb_vmm_control(struct igb_adapter *adapter) 5846static void igb_vmm_control(struct igb_adapter *adapter)
5552{ 5847{
5553 struct e1000_hw *hw = &adapter->hw; 5848 struct e1000_hw *hw = &adapter->hw;
5554 u32 reg_data; 5849 u32 reg;
5555 5850
5556 if (!adapter->vfs_allocated_count) 5851 /* replication is not supported for 82575 */
5852 if (hw->mac.type == e1000_82575)
5557 return; 5853 return;
5558 5854
5559 /* VF's need PF reset indication before they 5855 /* enable replication vlan tag stripping */
5560 * can send/receive mail */ 5856 reg = rd32(E1000_RPLOLR);
5561 reg_data = rd32(E1000_CTRL_EXT); 5857 reg |= E1000_RPLOLR_STRVLAN;
5562 reg_data |= E1000_CTRL_EXT_PFRSTD; 5858 wr32(E1000_RPLOLR, reg);
5563 wr32(E1000_CTRL_EXT, reg_data);
5564 5859
5565 igb_vmdq_set_loopback_pf(hw, true); 5860 /* notify HW that the MAC is adding vlan tags */
5566 igb_vmdq_set_replication_pf(hw, true); 5861 reg = rd32(E1000_DTXCTL);
5862 reg |= E1000_DTXCTL_VLAN_ADDED;
5863 wr32(E1000_DTXCTL, reg);
5864
5865 if (adapter->vfs_allocated_count) {
5866 igb_vmdq_set_loopback_pf(hw, true);
5867 igb_vmdq_set_replication_pf(hw, true);
5868 } else {
5869 igb_vmdq_set_loopback_pf(hw, false);
5870 igb_vmdq_set_replication_pf(hw, false);
5871 }
5567} 5872}
5568 5873
5569/* igb_main.c */ 5874/* igb_main.c */