aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe.h10
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c119
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c160
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c40
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c672
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c137
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h12
11 files changed, 933 insertions, 250 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 19e94ee155a2..d0ea3d6dea95 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -111,7 +111,10 @@ struct vf_data_storage {
111 u16 default_vf_vlan_id; 111 u16 default_vf_vlan_id;
112 u16 vlans_enabled; 112 u16 vlans_enabled;
113 bool clear_to_send; 113 bool clear_to_send;
114 bool pf_set_mac;
114 int rar; 115 int rar;
116 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
117 u16 pf_qos;
115}; 118};
116 119
117/* wrapper around a pointer to a socket buffer, 120/* wrapper around a pointer to a socket buffer,
@@ -204,14 +207,17 @@ enum ixgbe_ring_f_enum {
204#define IXGBE_MAX_FDIR_INDICES 64 207#define IXGBE_MAX_FDIR_INDICES 64
205#ifdef IXGBE_FCOE 208#ifdef IXGBE_FCOE
206#define IXGBE_MAX_FCOE_INDICES 8 209#define IXGBE_MAX_FCOE_INDICES 8
210#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
211#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
212#else
213#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
214#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
207#endif /* IXGBE_FCOE */ 215#endif /* IXGBE_FCOE */
208struct ixgbe_ring_feature { 216struct ixgbe_ring_feature {
209 int indices; 217 int indices;
210 int mask; 218 int mask;
211} ____cacheline_internodealigned_in_smp; 219} ____cacheline_internodealigned_in_smp;
212 220
213#define MAX_RX_QUEUES 128
214#define MAX_TX_QUEUES 128
215 221
216#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 222#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
217 ? 8 : 1) 223 ? 8 : 1)
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 1f30e163bd9c..38c384031c4c 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -39,6 +39,9 @@
39#define IXGBE_82599_MC_TBL_SIZE 128 39#define IXGBE_82599_MC_TBL_SIZE 128
40#define IXGBE_82599_VFT_TBL_SIZE 128 40#define IXGBE_82599_VFT_TBL_SIZE 128
41 41
42void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
43void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
44void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
42s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 45s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
43 ixgbe_link_speed speed, 46 ixgbe_link_speed speed,
44 bool autoneg, 47 bool autoneg,
@@ -68,7 +71,15 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
68 if (hw->phy.multispeed_fiber) { 71 if (hw->phy.multispeed_fiber) {
69 /* Set up dual speed SFP+ support */ 72 /* Set up dual speed SFP+ support */
70 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
74 mac->ops.disable_tx_laser =
75 &ixgbe_disable_tx_laser_multispeed_fiber;
76 mac->ops.enable_tx_laser =
77 &ixgbe_enable_tx_laser_multispeed_fiber;
78 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
71 } else { 79 } else {
80 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL;
82 mac->ops.flap_tx_laser = NULL;
72 if ((mac->ops.get_media_type(hw) == 83 if ((mac->ops.get_media_type(hw) ==
73 ixgbe_media_type_backplane) && 84 ixgbe_media_type_backplane) &&
74 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 85 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -412,6 +423,67 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
412 return status; 423 return status;
413} 424}
414 425
426 /**
427 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
428 * @hw: pointer to hardware structure
429 *
430 * The base drivers may require better control over SFP+ module
431 * PHY states. This includes selectively shutting down the Tx
432 * laser on the PHY, effectively halting physical link.
433 **/
434void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
435{
436 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
437
438 /* Disable tx laser; allow 100us to go dark per spec */
439 esdp_reg |= IXGBE_ESDP_SDP3;
440 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
441 IXGBE_WRITE_FLUSH(hw);
442 udelay(100);
443}
444
445/**
446 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
447 * @hw: pointer to hardware structure
448 *
449 * The base drivers may require better control over SFP+ module
450 * PHY states. This includes selectively turning on the Tx
451 * laser on the PHY, effectively starting physical link.
452 **/
453void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
454{
455 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
456
457 /* Enable tx laser; allow 100ms to light up */
458 esdp_reg &= ~IXGBE_ESDP_SDP3;
459 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
460 IXGBE_WRITE_FLUSH(hw);
461 msleep(100);
462}
463
464/**
465 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
466 * @hw: pointer to hardware structure
467 *
468 * When the driver changes the link speeds that it can support,
469 * it sets autotry_restart to true to indicate that we need to
470 * initiate a new autotry session with the link partner. To do
471 * so, we set the speed then disable and re-enable the tx laser, to
472 * alert the link partner that it also needs to restart autotry on its
473 * end. This is consistent with true clause 37 autoneg, which also
474 * involves a loss of signal.
475 **/
476void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
477{
478 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
479
480 if (hw->mac.autotry_restart) {
481 ixgbe_disable_tx_laser_multispeed_fiber(hw);
482 ixgbe_enable_tx_laser_multispeed_fiber(hw);
483 hw->mac.autotry_restart = false;
484 }
485}
486
415/** 487/**
416 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 488 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
417 * @hw: pointer to hardware structure 489 * @hw: pointer to hardware structure
@@ -440,16 +512,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
440 speed &= phy_link_speed; 512 speed &= phy_link_speed;
441 513
442 /* 514 /*
443 * When the driver changes the link speeds that it can support,
444 * it sets autotry_restart to true to indicate that we need to
445 * initiate a new autotry session with the link partner. To do
446 * so, we set the speed then disable and re-enable the tx laser, to
447 * alert the link partner that it also needs to restart autotry on its
448 * end. This is consistent with true clause 37 autoneg, which also
449 * involves a loss of signal.
450 */
451
452 /*
453 * Try each speed one by one, highest priority first. We do this in 515 * Try each speed one by one, highest priority first. We do this in
454 * software because 10gb fiber doesn't support speed autonegotiation. 516 * software because 10gb fiber doesn't support speed autonegotiation.
455 */ 517 */
@@ -466,6 +528,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
466 /* Set the module link speed */ 528 /* Set the module link speed */
467 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 529 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
468 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 530 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
531 IXGBE_WRITE_FLUSH(hw);
469 532
470 /* Allow module to change analog characteristics (1G->10G) */ 533 /* Allow module to change analog characteristics (1G->10G) */
471 msleep(40); 534 msleep(40);
@@ -478,19 +541,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
478 return status; 541 return status;
479 542
480 /* Flap the tx laser if it has not already been done */ 543 /* Flap the tx laser if it has not already been done */
481 if (hw->mac.autotry_restart) { 544 hw->mac.ops.flap_tx_laser(hw);
482 /* Disable tx laser; allow 100us to go dark per spec */
483 esdp_reg |= IXGBE_ESDP_SDP3;
484 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
485 udelay(100);
486
487 /* Enable tx laser; allow 2ms to light up per spec */
488 esdp_reg &= ~IXGBE_ESDP_SDP3;
489 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
490 msleep(2);
491
492 hw->mac.autotry_restart = false;
493 }
494 545
495 /* 546 /*
496 * Wait for the controller to acquire link. Per IEEE 802.3ap, 547 * Wait for the controller to acquire link. Per IEEE 802.3ap,
@@ -525,6 +576,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
525 esdp_reg &= ~IXGBE_ESDP_SDP5; 576 esdp_reg &= ~IXGBE_ESDP_SDP5;
526 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 577 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
527 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 578 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
579 IXGBE_WRITE_FLUSH(hw);
528 580
529 /* Allow module to change analog characteristics (10G->1G) */ 581 /* Allow module to change analog characteristics (10G->1G) */
530 msleep(40); 582 msleep(40);
@@ -537,19 +589,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
537 return status; 589 return status;
538 590
539 /* Flap the tx laser if it has not already been done */ 591 /* Flap the tx laser if it has not already been done */
540 if (hw->mac.autotry_restart) { 592 hw->mac.ops.flap_tx_laser(hw);
541 /* Disable tx laser; allow 100us to go dark per spec */
542 esdp_reg |= IXGBE_ESDP_SDP3;
543 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
544 udelay(100);
545
546 /* Enable tx laser; allow 2ms to light up per spec */
547 esdp_reg &= ~IXGBE_ESDP_SDP3;
548 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
549 msleep(2);
550
551 hw->mac.autotry_restart = false;
552 }
553 593
554 /* Wait for the link partner to also set speed */ 594 /* Wait for the link partner to also set speed */
555 msleep(100); 595 msleep(100);
@@ -602,6 +642,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
602 s32 i, j; 642 s32 i, j;
603 bool link_up = false; 643 bool link_up = false;
604 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 644 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
645 struct ixgbe_adapter *adapter = hw->back;
605 646
606 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n"); 647 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
607 648
@@ -686,6 +727,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
686 autoneg_wait_to_complete); 727 autoneg_wait_to_complete);
687 728
688out: 729out:
730 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
731 netif_info(adapter, hw, adapter->netdev, "Smartspeed has"
732 " downgraded the link speed from the maximum"
733 " advertised\n");
689 return status; 734 return status;
690} 735}
691 736
@@ -1263,7 +1308,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1263 } 1308 }
1264 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1309 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1265 hw_dbg(hw ,"Flow Director previous command isn't complete, " 1310 hw_dbg(hw ,"Flow Director previous command isn't complete, "
1266 "aborting table re-initialization. \n"); 1311 "aborting table re-initialization.\n");
1267 return IXGBE_ERR_FDIR_REINIT_FAILED; 1312 return IXGBE_ERR_FDIR_REINIT_FAILED;
1268 } 1313 }
1269 1314
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index eb49020903c1..6eb5814ca7da 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1484,26 +1484,24 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1484/** 1484/**
1485 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 1485 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
1486 * @hw: pointer to hardware structure 1486 * @hw: pointer to hardware structure
1487 * @mc_addr_list: the list of new multicast addresses 1487 * @netdev: pointer to net device structure
1488 * @mc_addr_count: number of addresses
1489 * @next: iterator function to walk the multicast address list
1490 * 1488 *
1491 * The given list replaces any existing list. Clears the MC addrs from receive 1489 * The given list replaces any existing list. Clears the MC addrs from receive
1492 * address registers and the multicast table. Uses unused receive address 1490 * address registers and the multicast table. Uses unused receive address
1493 * registers for the first multicast addresses, and hashes the rest into the 1491 * registers for the first multicast addresses, and hashes the rest into the
1494 * multicast table. 1492 * multicast table.
1495 **/ 1493 **/
1496s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 1494s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1497 u32 mc_addr_count, ixgbe_mc_addr_itr next) 1495 struct net_device *netdev)
1498{ 1496{
1497 struct netdev_hw_addr *ha;
1499 u32 i; 1498 u32 i;
1500 u32 vmdq;
1501 1499
1502 /* 1500 /*
1503 * Set the new number of MC addresses that we are being requested to 1501 * Set the new number of MC addresses that we are being requested to
1504 * use. 1502 * use.
1505 */ 1503 */
1506 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 1504 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1507 hw->addr_ctrl.mta_in_use = 0; 1505 hw->addr_ctrl.mta_in_use = 0;
1508 1506
1509 /* Clear the MTA */ 1507 /* Clear the MTA */
@@ -1512,9 +1510,9 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
1512 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1510 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1513 1511
1514 /* Add the new addresses */ 1512 /* Add the new addresses */
1515 for (i = 0; i < mc_addr_count; i++) { 1513 netdev_for_each_mc_addr(ha, netdev) {
1516 hw_dbg(hw, " Adding the multicast addresses:\n"); 1514 hw_dbg(hw, " Adding the multicast addresses:\n");
1517 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 1515 ixgbe_set_mta(hw, ha->addr);
1518 } 1516 }
1519 1517
1520 /* Enable mta */ 1518 /* Enable mta */
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 13606d4809c9..264eef575cd6 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -56,9 +56,8 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
56 u32 enable_addr); 56 u32 enable_addr);
57s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); 57s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
58s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 58s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
59s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 59s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
60 u32 mc_addr_count, 60 struct net_device *netdev);
61 ixgbe_mc_addr_itr func);
62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, 61s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
63 struct net_device *netdev); 62 struct net_device *netdev);
64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 63s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 7949a446e4c7..dc7fd5b70bc3 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h>
32#include <linux/pci.h> 33#include <linux/pci.h>
33#include <linux/netdevice.h> 34#include <linux/netdevice.h>
34#include <linux/ethtool.h> 35#include <linux/ethtool.h>
@@ -364,7 +365,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
364 else 365 else
365 fc.disable_fc_autoneg = false; 366 fc.disable_fc_autoneg = false;
366 367
367 if (pause->rx_pause && pause->tx_pause) 368 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
368 fc.requested_mode = ixgbe_fc_full; 369 fc.requested_mode = ixgbe_fc_full;
369 else if (pause->rx_pause && !pause->tx_pause) 370 else if (pause->rx_pause && !pause->tx_pause)
370 fc.requested_mode = ixgbe_fc_rx_pause; 371 fc.requested_mode = ixgbe_fc_rx_pause;
@@ -1457,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1457 struct ixgbe_tx_buffer *buf = 1458 struct ixgbe_tx_buffer *buf =
1458 &(tx_ring->tx_buffer_info[i]); 1459 &(tx_ring->tx_buffer_info[i]);
1459 if (buf->dma) 1460 if (buf->dma)
1460 pci_unmap_single(pdev, buf->dma, buf->length, 1461 dma_unmap_single(&pdev->dev, buf->dma,
1461 PCI_DMA_TODEVICE); 1462 buf->length, DMA_TO_DEVICE);
1462 if (buf->skb) 1463 if (buf->skb)
1463 dev_kfree_skb(buf->skb); 1464 dev_kfree_skb(buf->skb);
1464 } 1465 }
@@ -1469,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1469 struct ixgbe_rx_buffer *buf = 1470 struct ixgbe_rx_buffer *buf =
1470 &(rx_ring->rx_buffer_info[i]); 1471 &(rx_ring->rx_buffer_info[i]);
1471 if (buf->dma) 1472 if (buf->dma)
1472 pci_unmap_single(pdev, buf->dma, 1473 dma_unmap_single(&pdev->dev, buf->dma,
1473 IXGBE_RXBUFFER_2048, 1474 IXGBE_RXBUFFER_2048,
1474 PCI_DMA_FROMDEVICE); 1475 DMA_FROM_DEVICE);
1475 if (buf->skb) 1476 if (buf->skb)
1476 dev_kfree_skb(buf->skb); 1477 dev_kfree_skb(buf->skb);
1477 } 1478 }
1478 } 1479 }
1479 1480
1480 if (tx_ring->desc) { 1481 if (tx_ring->desc) {
1481 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, 1482 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1482 tx_ring->dma); 1483 tx_ring->dma);
1483 tx_ring->desc = NULL; 1484 tx_ring->desc = NULL;
1484 } 1485 }
1485 if (rx_ring->desc) { 1486 if (rx_ring->desc) {
1486 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, 1487 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1487 rx_ring->dma); 1488 rx_ring->dma);
1488 rx_ring->desc = NULL; 1489 rx_ring->desc = NULL;
1489 } 1490 }
1490 1491
@@ -1519,8 +1520,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1519 1520
1520 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 1521 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
1521 tx_ring->size = ALIGN(tx_ring->size, 4096); 1522 tx_ring->size = ALIGN(tx_ring->size, 4096);
1522 if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1523 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1523 &tx_ring->dma))) { 1524 &tx_ring->dma, GFP_KERNEL);
1525 if (!(tx_ring->desc)) {
1524 ret_val = 2; 1526 ret_val = 2;
1525 goto err_nomem; 1527 goto err_nomem;
1526 } 1528 }
@@ -1562,8 +1564,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1562 tx_ring->tx_buffer_info[i].skb = skb; 1564 tx_ring->tx_buffer_info[i].skb = skb;
1563 tx_ring->tx_buffer_info[i].length = skb->len; 1565 tx_ring->tx_buffer_info[i].length = skb->len;
1564 tx_ring->tx_buffer_info[i].dma = 1566 tx_ring->tx_buffer_info[i].dma =
1565 pci_map_single(pdev, skb->data, skb->len, 1567 dma_map_single(&pdev->dev, skb->data, skb->len,
1566 PCI_DMA_TODEVICE); 1568 DMA_TO_DEVICE);
1567 desc->read.buffer_addr = 1569 desc->read.buffer_addr =
1568 cpu_to_le64(tx_ring->tx_buffer_info[i].dma); 1570 cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
1569 desc->read.cmd_type_len = cpu_to_le32(skb->len); 1571 desc->read.cmd_type_len = cpu_to_le32(skb->len);
@@ -1592,8 +1594,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1592 1594
1593 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 1595 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
1594 rx_ring->size = ALIGN(rx_ring->size, 4096); 1596 rx_ring->size = ALIGN(rx_ring->size, 4096);
1595 if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1597 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1596 &rx_ring->dma))) { 1598 &rx_ring->dma, GFP_KERNEL);
1599 if (!(rx_ring->desc)) {
1597 ret_val = 5; 1600 ret_val = 5;
1598 goto err_nomem; 1601 goto err_nomem;
1599 } 1602 }
@@ -1660,8 +1663,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1660 skb_reserve(skb, NET_IP_ALIGN); 1663 skb_reserve(skb, NET_IP_ALIGN);
1661 rx_ring->rx_buffer_info[i].skb = skb; 1664 rx_ring->rx_buffer_info[i].skb = skb;
1662 rx_ring->rx_buffer_info[i].dma = 1665 rx_ring->rx_buffer_info[i].dma =
1663 pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048, 1666 dma_map_single(&pdev->dev, skb->data,
1664 PCI_DMA_FROMDEVICE); 1667 IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
1665 rx_desc->read.pkt_addr = 1668 rx_desc->read.pkt_addr =
1666 cpu_to_le64(rx_ring->rx_buffer_info[i].dma); 1669 cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
1667 memset(skb->data, 0x00, skb->len); 1670 memset(skb->data, 0x00, skb->len);
@@ -1774,10 +1777,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1774 ixgbe_create_lbtest_frame( 1777 ixgbe_create_lbtest_frame(
1775 tx_ring->tx_buffer_info[k].skb, 1778 tx_ring->tx_buffer_info[k].skb,
1776 1024); 1779 1024);
1777 pci_dma_sync_single_for_device(pdev, 1780 dma_sync_single_for_device(&pdev->dev,
1778 tx_ring->tx_buffer_info[k].dma, 1781 tx_ring->tx_buffer_info[k].dma,
1779 tx_ring->tx_buffer_info[k].length, 1782 tx_ring->tx_buffer_info[k].length,
1780 PCI_DMA_TODEVICE); 1783 DMA_TO_DEVICE);
1781 if (unlikely(++k == tx_ring->count)) 1784 if (unlikely(++k == tx_ring->count))
1782 k = 0; 1785 k = 0;
1783 } 1786 }
@@ -1788,10 +1791,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1788 good_cnt = 0; 1791 good_cnt = 0;
1789 do { 1792 do {
1790 /* receive the sent packets */ 1793 /* receive the sent packets */
1791 pci_dma_sync_single_for_cpu(pdev, 1794 dma_sync_single_for_cpu(&pdev->dev,
1792 rx_ring->rx_buffer_info[l].dma, 1795 rx_ring->rx_buffer_info[l].dma,
1793 IXGBE_RXBUFFER_2048, 1796 IXGBE_RXBUFFER_2048,
1794 PCI_DMA_FROMDEVICE); 1797 DMA_FROM_DEVICE);
1795 ret_val = ixgbe_check_lbtest_frame( 1798 ret_val = ixgbe_check_lbtest_frame(
1796 rx_ring->rx_buffer_info[l].skb, 1024); 1799 rx_ring->rx_buffer_info[l].skb, 1024);
1797 if (!ret_val) 1800 if (!ret_val)
@@ -1853,6 +1856,26 @@ static void ixgbe_diag_test(struct net_device *netdev,
1853 if (ixgbe_link_test(adapter, &data[4])) 1856 if (ixgbe_link_test(adapter, &data[4]))
1854 eth_test->flags |= ETH_TEST_FL_FAILED; 1857 eth_test->flags |= ETH_TEST_FL_FAILED;
1855 1858
1859 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1860 int i;
1861 for (i = 0; i < adapter->num_vfs; i++) {
1862 if (adapter->vfinfo[i].clear_to_send) {
1863 netdev_warn(netdev, "%s",
1864 "offline diagnostic is not "
1865 "supported when VFs are "
1866 "present\n");
1867 data[0] = 1;
1868 data[1] = 1;
1869 data[2] = 1;
1870 data[3] = 1;
1871 eth_test->flags |= ETH_TEST_FL_FAILED;
1872 clear_bit(__IXGBE_TESTING,
1873 &adapter->state);
1874 goto skip_ol_tests;
1875 }
1876 }
1877 }
1878
1856 if (if_running) 1879 if (if_running)
1857 /* indicate we're in test mode */ 1880 /* indicate we're in test mode */
1858 dev_close(netdev); 1881 dev_close(netdev);
@@ -1908,6 +1931,7 @@ skip_loopback:
1908 1931
1909 clear_bit(__IXGBE_TESTING, &adapter->state); 1932 clear_bit(__IXGBE_TESTING, &adapter->state);
1910 } 1933 }
1934skip_ol_tests:
1911 msleep_interruptible(4 * 1000); 1935 msleep_interruptible(4 * 1000);
1912} 1936}
1913 1937
@@ -2057,12 +2081,32 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2057 return 0; 2081 return 0;
2058} 2082}
2059 2083
2084/*
2085 * this function must be called before setting the new value of
2086 * rx_itr_setting
2087 */
2088static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter,
2089 struct ethtool_coalesce *ec)
2090{
2091 /* check the old value and enable RSC if necessary */
2092 if ((adapter->rx_itr_setting == 0) &&
2093 (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
2094 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2095 adapter->netdev->features |= NETIF_F_LRO;
2096 DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n",
2097 ec->rx_coalesce_usecs);
2098 return true;
2099 }
2100 return false;
2101}
2102
2060static int ixgbe_set_coalesce(struct net_device *netdev, 2103static int ixgbe_set_coalesce(struct net_device *netdev,
2061 struct ethtool_coalesce *ec) 2104 struct ethtool_coalesce *ec)
2062{ 2105{
2063 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2106 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2064 struct ixgbe_q_vector *q_vector; 2107 struct ixgbe_q_vector *q_vector;
2065 int i; 2108 int i;
2109 bool need_reset = false;
2066 2110
2067 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2111 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2068 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count 2112 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
@@ -2073,11 +2117,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2073 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2117 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2074 2118
2075 if (ec->rx_coalesce_usecs > 1) { 2119 if (ec->rx_coalesce_usecs > 1) {
2120 u32 max_int;
2121 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2122 max_int = IXGBE_MAX_RSC_INT_RATE;
2123 else
2124 max_int = IXGBE_MAX_INT_RATE;
2125
2076 /* check the limits */ 2126 /* check the limits */
2077 if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) || 2127 if ((1000000/ec->rx_coalesce_usecs > max_int) ||
2078 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2128 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2079 return -EINVAL; 2129 return -EINVAL;
2080 2130
2131 /* check the old value and enable RSC if necessary */
2132 need_reset = ixgbe_reenable_rsc(adapter, ec);
2133
2081 /* store the value in ints/second */ 2134 /* store the value in ints/second */
2082 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; 2135 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2083 2136
@@ -2086,6 +2139,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2086 /* clear the lower bit as its used for dynamic state */ 2139 /* clear the lower bit as its used for dynamic state */
2087 adapter->rx_itr_setting &= ~1; 2140 adapter->rx_itr_setting &= ~1;
2088 } else if (ec->rx_coalesce_usecs == 1) { 2141 } else if (ec->rx_coalesce_usecs == 1) {
2142 /* check the old value and enable RSC if necessary */
2143 need_reset = ixgbe_reenable_rsc(adapter, ec);
2144
2089 /* 1 means dynamic mode */ 2145 /* 1 means dynamic mode */
2090 adapter->rx_eitr_param = 20000; 2146 adapter->rx_eitr_param = 20000;
2091 adapter->rx_itr_setting = 1; 2147 adapter->rx_itr_setting = 1;
@@ -2094,14 +2150,30 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2094 * any other value means disable eitr, which is best 2150 * any other value means disable eitr, which is best
2095 * served by setting the interrupt rate very high 2151 * served by setting the interrupt rate very high
2096 */ 2152 */
2097 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 2153 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2098 adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
2099 else
2100 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2101 adapter->rx_itr_setting = 0; 2154 adapter->rx_itr_setting = 0;
2155
2156 /*
2157 * if hardware RSC is enabled, disable it when
2158 * setting low latency mode, to avoid errata, assuming
2159 * that when the user set low latency mode they want
2160 * it at the cost of anything else
2161 */
2162 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2163 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2164 netdev->features &= ~NETIF_F_LRO;
2165 DPRINTK(PROBE, INFO,
2166 "rx-usecs set to 0, disabling RSC\n");
2167
2168 need_reset = true;
2169 }
2102 } 2170 }
2103 2171
2104 if (ec->tx_coalesce_usecs > 1) { 2172 if (ec->tx_coalesce_usecs > 1) {
2173 /*
2174 * don't have to worry about max_int as above because
2175 * tx vectors don't do hardware RSC (an rx function)
2176 */
2105 /* check the limits */ 2177 /* check the limits */
2106 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) || 2178 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2107 (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2179 (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
@@ -2145,6 +2217,18 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2145 ixgbe_write_eitr(q_vector); 2217 ixgbe_write_eitr(q_vector);
2146 } 2218 }
2147 2219
2220 /*
2221 * do reset here at the end to make sure EITR==0 case is handled
2222 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2223 * also locks in RSC enable/disable which requires reset
2224 */
2225 if (need_reset) {
2226 if (netif_running(netdev))
2227 ixgbe_reinit_locked(adapter);
2228 else
2229 ixgbe_reset(adapter);
2230 }
2231
2148 return 0; 2232 return 0;
2149} 2233}
2150 2234
@@ -2156,10 +2240,26 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2156 ethtool_op_set_flags(netdev, data); 2240 ethtool_op_set_flags(netdev, data);
2157 2241
2158 /* if state changes we need to update adapter->flags and reset */ 2242 /* if state changes we need to update adapter->flags and reset */
2159 if ((!!(data & ETH_FLAG_LRO)) != 2243 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
2160 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { 2244 /*
2161 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2245 * cast both to bool and verify if they are set the same
2162 need_reset = true; 2246 * but only enable RSC if itr is non-zero, as
2247 * itr=0 and RSC are mutually exclusive
2248 */
2249 if (((!!(data & ETH_FLAG_LRO)) !=
2250 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
2251 adapter->rx_itr_setting) {
2252 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2253 switch (adapter->hw.mac.type) {
2254 case ixgbe_mac_82599EB:
2255 need_reset = true;
2256 break;
2257 default:
2258 break;
2259 }
2260 } else if (!adapter->rx_itr_setting) {
2261 netdev->features &= ~ETH_FLAG_LRO;
2262 }
2163 } 2263 }
2164 2264
2165 /* 2265 /*
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 4123dec0dfb7..6493049b663d 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -31,6 +31,7 @@
31#include "ixgbe_dcb_82599.h" 31#include "ixgbe_dcb_82599.h"
32#endif /* CONFIG_IXGBE_DCB */ 32#endif /* CONFIG_IXGBE_DCB */
33#include <linux/if_ether.h> 33#include <linux/if_ether.h>
34#include <linux/gfp.h>
34#include <scsi/scsi_cmnd.h> 35#include <scsi/scsi_cmnd.h>
35#include <scsi/scsi_device.h> 36#include <scsi/scsi_device.h>
36#include <scsi/fc/fc_fs.h> 37#include <scsi/fc/fc_fs.h>
@@ -202,6 +203,15 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
202 addr = sg_dma_address(sg); 203 addr = sg_dma_address(sg);
203 len = sg_dma_len(sg); 204 len = sg_dma_len(sg);
204 while (len) { 205 while (len) {
206 /* max number of buffers allowed in one DDP context */
207 if (j >= IXGBE_BUFFCNT_MAX) {
208 netif_err(adapter, drv, adapter->netdev,
209 "xid=%x:%d,%d,%d:addr=%llx "
210 "not enough descriptors\n",
211 xid, i, j, dmacount, (u64)addr);
212 goto out_noddp_free;
213 }
214
205 /* get the offset of length of current buffer */ 215 /* get the offset of length of current buffer */
206 thisoff = addr & ((dma_addr_t)bufflen - 1); 216 thisoff = addr & ((dma_addr_t)bufflen - 1);
207 thislen = min((bufflen - thisoff), len); 217 thislen = min((bufflen - thisoff), len);
@@ -227,20 +237,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
227 len -= thislen; 237 len -= thislen;
228 addr += thislen; 238 addr += thislen;
229 j++; 239 j++;
230 /* max number of buffers allowed in one DDP context */
231 if (j > IXGBE_BUFFCNT_MAX) {
232 DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx "
233 "not enough descriptors\n",
234 xid, i, j, dmacount, (u64)addr);
235 goto out_noddp_free;
236 }
237 } 240 }
238 } 241 }
239 /* only the last buffer may have non-full bufflen */ 242 /* only the last buffer may have non-full bufflen */
240 lastsize = thisoff + thislen; 243 lastsize = thisoff + thislen;
241 244
242 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 245 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
243 fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT); 246 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
244 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 247 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
245 fcbuff |= (IXGBE_FCBUFF_VALID); 248 fcbuff |= (IXGBE_FCBUFF_VALID);
246 249
@@ -520,6 +523,9 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
520 /* Enable L2 eth type filter for FCoE */ 523 /* Enable L2 eth type filter for FCoE */
521 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), 524 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
522 (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); 525 (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
526 /* Enable L2 eth type filter for FIP */
527 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
528 (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
523 if (adapter->ring_feature[RING_F_FCOE].indices) { 529 if (adapter->ring_feature[RING_F_FCOE].indices) {
524 /* Use multiple rx queues for FCoE by redirection table */ 530 /* Use multiple rx queues for FCoE by redirection table */
525 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { 531 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
@@ -530,6 +536,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
530 } 536 }
531 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 537 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
532 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 538 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
539 fcoe_i = f->mask;
540 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
541 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
542 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
543 IXGBE_ETQS_QUEUE_EN |
544 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
533 } else { 545 } else {
534 /* Use single rx queue for FCoE */ 546 /* Use single rx queue for FCoE */
535 fcoe_i = f->mask; 547 fcoe_i = f->mask;
@@ -539,6 +551,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
539 IXGBE_ETQS_QUEUE_EN | 551 IXGBE_ETQS_QUEUE_EN |
540 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 552 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
541 } 553 }
554 /* send FIP frames to the first FCoE queue */
555 fcoe_i = f->mask;
556 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
557 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
558 IXGBE_ETQS_QUEUE_EN |
559 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
542 560
543 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, 561 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
544 IXGBE_FCRXCTRL_FCOELLI | 562 IXGBE_FCRXCTRL_FCOELLI |
@@ -614,9 +632,9 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
614 netdev->vlan_features |= NETIF_F_FSO; 632 netdev->vlan_features |= NETIF_F_FSO;
615 netdev->vlan_features |= NETIF_F_FCOE_MTU; 633 netdev->vlan_features |= NETIF_F_FCOE_MTU;
616 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 634 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
617 netdev_features_change(netdev);
618 635
619 ixgbe_init_interrupt_scheme(adapter); 636 ixgbe_init_interrupt_scheme(adapter);
637 netdev_features_change(netdev);
620 638
621 if (netif_running(netdev)) 639 if (netif_running(netdev))
622 netdev->netdev_ops->ndo_open(netdev); 640 netdev->netdev_ops->ndo_open(netdev);
@@ -660,11 +678,11 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
660 netdev->vlan_features &= ~NETIF_F_FSO; 678 netdev->vlan_features &= ~NETIF_F_FSO;
661 netdev->vlan_features &= ~NETIF_F_FCOE_MTU; 679 netdev->vlan_features &= ~NETIF_F_FCOE_MTU;
662 netdev->fcoe_ddp_xid = 0; 680 netdev->fcoe_ddp_xid = 0;
663 netdev_features_change(netdev);
664 681
665 ixgbe_cleanup_fcoe(adapter); 682 ixgbe_cleanup_fcoe(adapter);
666
667 ixgbe_init_interrupt_scheme(adapter); 683 ixgbe_init_interrupt_scheme(adapter);
684 netdev_features_change(netdev);
685
668 if (netif_running(netdev)) 686 if (netif_running(netdev))
669 netdev->netdev_ops->ndo_open(netdev); 687 netdev->netdev_ops->ndo_open(netdev);
670 rc = 0; 688 rc = 0;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 45e3532b166f..d1a1868df817 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -36,6 +36,7 @@
36#include <linux/tcp.h> 36#include <linux/tcp.h>
37#include <linux/pkt_sched.h> 37#include <linux/pkt_sched.h>
38#include <linux/ipv6.h> 38#include <linux/ipv6.h>
39#include <linux/slab.h>
39#include <net/checksum.h> 40#include <net/checksum.h>
40#include <net/ip6_checksum.h> 41#include <net/ip6_checksum.h>
41#include <linux/ethtool.h> 42#include <linux/ethtool.h>
@@ -174,6 +175,345 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
174 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 175 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
175} 176}
176 177
178struct ixgbe_reg_info {
179 u32 ofs;
180 char *name;
181};
182
183static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
184
185 /* General Registers */
186 {IXGBE_CTRL, "CTRL"},
187 {IXGBE_STATUS, "STATUS"},
188 {IXGBE_CTRL_EXT, "CTRL_EXT"},
189
190 /* Interrupt Registers */
191 {IXGBE_EICR, "EICR"},
192
193 /* RX Registers */
194 {IXGBE_SRRCTL(0), "SRRCTL"},
195 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
196 {IXGBE_RDLEN(0), "RDLEN"},
197 {IXGBE_RDH(0), "RDH"},
198 {IXGBE_RDT(0), "RDT"},
199 {IXGBE_RXDCTL(0), "RXDCTL"},
200 {IXGBE_RDBAL(0), "RDBAL"},
201 {IXGBE_RDBAH(0), "RDBAH"},
202
203 /* TX Registers */
204 {IXGBE_TDBAL(0), "TDBAL"},
205 {IXGBE_TDBAH(0), "TDBAH"},
206 {IXGBE_TDLEN(0), "TDLEN"},
207 {IXGBE_TDH(0), "TDH"},
208 {IXGBE_TDT(0), "TDT"},
209 {IXGBE_TXDCTL(0), "TXDCTL"},
210
211 /* List Terminator */
212 {}
213};
214
215
216/*
217 * ixgbe_regdump - register printout routine
218 */
219static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
220{
221 int i = 0, j = 0;
222 char rname[16];
223 u32 regs[64];
224
225 switch (reginfo->ofs) {
226 case IXGBE_SRRCTL(0):
227 for (i = 0; i < 64; i++)
228 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
229 break;
230 case IXGBE_DCA_RXCTRL(0):
231 for (i = 0; i < 64; i++)
232 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
233 break;
234 case IXGBE_RDLEN(0):
235 for (i = 0; i < 64; i++)
236 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
237 break;
238 case IXGBE_RDH(0):
239 for (i = 0; i < 64; i++)
240 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
241 break;
242 case IXGBE_RDT(0):
243 for (i = 0; i < 64; i++)
244 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
245 break;
246 case IXGBE_RXDCTL(0):
247 for (i = 0; i < 64; i++)
248 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
249 break;
250 case IXGBE_RDBAL(0):
251 for (i = 0; i < 64; i++)
252 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
253 break;
254 case IXGBE_RDBAH(0):
255 for (i = 0; i < 64; i++)
256 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
257 break;
258 case IXGBE_TDBAL(0):
259 for (i = 0; i < 64; i++)
260 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
261 break;
262 case IXGBE_TDBAH(0):
263 for (i = 0; i < 64; i++)
264 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
265 break;
266 case IXGBE_TDLEN(0):
267 for (i = 0; i < 64; i++)
268 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
269 break;
270 case IXGBE_TDH(0):
271 for (i = 0; i < 64; i++)
272 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
273 break;
274 case IXGBE_TDT(0):
275 for (i = 0; i < 64; i++)
276 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
277 break;
278 case IXGBE_TXDCTL(0):
279 for (i = 0; i < 64; i++)
280 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
281 break;
282 default:
283 printk(KERN_INFO "%-15s %08x\n", reginfo->name,
284 IXGBE_READ_REG(hw, reginfo->ofs));
285 return;
286 }
287
288 for (i = 0; i < 8; i++) {
289 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
290 printk(KERN_ERR "%-15s ", rname);
291 for (j = 0; j < 8; j++)
292 printk(KERN_CONT "%08x ", regs[i*8+j]);
293 printk(KERN_CONT "\n");
294 }
295
296}
297
298/*
299 * ixgbe_dump - Print registers, tx-rings and rx-rings
300 */
301static void ixgbe_dump(struct ixgbe_adapter *adapter)
302{
303 struct net_device *netdev = adapter->netdev;
304 struct ixgbe_hw *hw = &adapter->hw;
305 struct ixgbe_reg_info *reginfo;
306 int n = 0;
307 struct ixgbe_ring *tx_ring;
308 struct ixgbe_tx_buffer *tx_buffer_info;
309 union ixgbe_adv_tx_desc *tx_desc;
310 struct my_u0 { u64 a; u64 b; } *u0;
311 struct ixgbe_ring *rx_ring;
312 union ixgbe_adv_rx_desc *rx_desc;
313 struct ixgbe_rx_buffer *rx_buffer_info;
314 u32 staterr;
315 int i = 0;
316
317 if (!netif_msg_hw(adapter))
318 return;
319
320 /* Print netdevice Info */
321 if (netdev) {
322 dev_info(&adapter->pdev->dev, "Net device Info\n");
323 printk(KERN_INFO "Device Name state "
324 "trans_start last_rx\n");
325 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
326 netdev->name,
327 netdev->state,
328 netdev->trans_start,
329 netdev->last_rx);
330 }
331
332 /* Print Registers */
333 dev_info(&adapter->pdev->dev, "Register Dump\n");
334 printk(KERN_INFO " Register Name Value\n");
335 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
336 reginfo->name; reginfo++) {
337 ixgbe_regdump(hw, reginfo);
338 }
339
340 /* Print TX Ring Summary */
341 if (!netdev || !netif_running(netdev))
342 goto exit;
343
344 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
345 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
346 "leng ntw timestamp\n");
347 for (n = 0; n < adapter->num_tx_queues; n++) {
348 tx_ring = adapter->tx_ring[n];
349 tx_buffer_info =
350 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
351 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
352 n, tx_ring->next_to_use, tx_ring->next_to_clean,
353 (u64)tx_buffer_info->dma,
354 tx_buffer_info->length,
355 tx_buffer_info->next_to_watch,
356 (u64)tx_buffer_info->time_stamp);
357 }
358
359 /* Print TX Rings */
360 if (!netif_msg_tx_done(adapter))
361 goto rx_ring_summary;
362
363 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
364
365 /* Transmit Descriptor Formats
366 *
367 * Advanced Transmit Descriptor
368 * +--------------------------------------------------------------+
369 * 0 | Buffer Address [63:0] |
370 * +--------------------------------------------------------------+
371 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
372 * +--------------------------------------------------------------+
373 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
374 */
375
376 for (n = 0; n < adapter->num_tx_queues; n++) {
377 tx_ring = adapter->tx_ring[n];
378 printk(KERN_INFO "------------------------------------\n");
379 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
380 printk(KERN_INFO "------------------------------------\n");
381 printk(KERN_INFO "T [desc] [address 63:0 ] "
382 "[PlPOIdStDDt Ln] [bi->dma ] "
383 "leng ntw timestamp bi->skb\n");
384
385 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
386 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
387 tx_buffer_info = &tx_ring->tx_buffer_info[i];
388 u0 = (struct my_u0 *)tx_desc;
389 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
390 " %04X %3X %016llX %p", i,
391 le64_to_cpu(u0->a),
392 le64_to_cpu(u0->b),
393 (u64)tx_buffer_info->dma,
394 tx_buffer_info->length,
395 tx_buffer_info->next_to_watch,
396 (u64)tx_buffer_info->time_stamp,
397 tx_buffer_info->skb);
398 if (i == tx_ring->next_to_use &&
399 i == tx_ring->next_to_clean)
400 printk(KERN_CONT " NTC/U\n");
401 else if (i == tx_ring->next_to_use)
402 printk(KERN_CONT " NTU\n");
403 else if (i == tx_ring->next_to_clean)
404 printk(KERN_CONT " NTC\n");
405 else
406 printk(KERN_CONT "\n");
407
408 if (netif_msg_pktdata(adapter) &&
409 tx_buffer_info->dma != 0)
410 print_hex_dump(KERN_INFO, "",
411 DUMP_PREFIX_ADDRESS, 16, 1,
412 phys_to_virt(tx_buffer_info->dma),
413 tx_buffer_info->length, true);
414 }
415 }
416
417 /* Print RX Rings Summary */
418rx_ring_summary:
419 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
420 printk(KERN_INFO "Queue [NTU] [NTC]\n");
421 for (n = 0; n < adapter->num_rx_queues; n++) {
422 rx_ring = adapter->rx_ring[n];
423 printk(KERN_INFO "%5d %5X %5X\n", n,
424 rx_ring->next_to_use, rx_ring->next_to_clean);
425 }
426
427 /* Print RX Rings */
428 if (!netif_msg_rx_status(adapter))
429 goto exit;
430
431 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
432
433 /* Advanced Receive Descriptor (Read) Format
434 * 63 1 0
435 * +-----------------------------------------------------+
436 * 0 | Packet Buffer Address [63:1] |A0/NSE|
437 * +----------------------------------------------+------+
438 * 8 | Header Buffer Address [63:1] | DD |
439 * +-----------------------------------------------------+
440 *
441 *
442 * Advanced Receive Descriptor (Write-Back) Format
443 *
444 * 63 48 47 32 31 30 21 20 16 15 4 3 0
445 * +------------------------------------------------------+
446 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
447 * | Checksum Ident | | | | Type | Type |
448 * +------------------------------------------------------+
449 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
450 * +------------------------------------------------------+
451 * 63 48 47 32 31 20 19 0
452 */
453 for (n = 0; n < adapter->num_rx_queues; n++) {
454 rx_ring = adapter->rx_ring[n];
455 printk(KERN_INFO "------------------------------------\n");
456 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
457 printk(KERN_INFO "------------------------------------\n");
458 printk(KERN_INFO "R [desc] [ PktBuf A0] "
459 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
460 "<-- Adv Rx Read format\n");
461 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
462 "[vl er S cks ln] ---------------- [bi->skb] "
463 "<-- Adv Rx Write-Back format\n");
464
465 for (i = 0; i < rx_ring->count; i++) {
466 rx_buffer_info = &rx_ring->rx_buffer_info[i];
467 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
468 u0 = (struct my_u0 *)rx_desc;
469 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
470 if (staterr & IXGBE_RXD_STAT_DD) {
471 /* Descriptor Done */
472 printk(KERN_INFO "RWB[0x%03X] %016llX "
473 "%016llX ---------------- %p", i,
474 le64_to_cpu(u0->a),
475 le64_to_cpu(u0->b),
476 rx_buffer_info->skb);
477 } else {
478 printk(KERN_INFO "R [0x%03X] %016llX "
479 "%016llX %016llX %p", i,
480 le64_to_cpu(u0->a),
481 le64_to_cpu(u0->b),
482 (u64)rx_buffer_info->dma,
483 rx_buffer_info->skb);
484
485 if (netif_msg_pktdata(adapter)) {
486 print_hex_dump(KERN_INFO, "",
487 DUMP_PREFIX_ADDRESS, 16, 1,
488 phys_to_virt(rx_buffer_info->dma),
489 rx_ring->rx_buf_len, true);
490
491 if (rx_ring->rx_buf_len
492 < IXGBE_RXBUFFER_2048)
493 print_hex_dump(KERN_INFO, "",
494 DUMP_PREFIX_ADDRESS, 16, 1,
495 phys_to_virt(
496 rx_buffer_info->page_dma +
497 rx_buffer_info->page_offset
498 ),
499 PAGE_SIZE/2, true);
500 }
501 }
502
503 if (i == rx_ring->next_to_use)
504 printk(KERN_CONT " NTU\n");
505 else if (i == rx_ring->next_to_clean)
506 printk(KERN_CONT " NTC\n");
507 else
508 printk(KERN_CONT "\n");
509
510 }
511 }
512
513exit:
514 return;
515}
516
177static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 517static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
178{ 518{
179 u32 ctrl_ext; 519 u32 ctrl_ext;
@@ -265,15 +605,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
265{ 605{
266 if (tx_buffer_info->dma) { 606 if (tx_buffer_info->dma) {
267 if (tx_buffer_info->mapped_as_page) 607 if (tx_buffer_info->mapped_as_page)
268 pci_unmap_page(adapter->pdev, 608 dma_unmap_page(&adapter->pdev->dev,
269 tx_buffer_info->dma, 609 tx_buffer_info->dma,
270 tx_buffer_info->length, 610 tx_buffer_info->length,
271 PCI_DMA_TODEVICE); 611 DMA_TO_DEVICE);
272 else 612 else
273 pci_unmap_single(adapter->pdev, 613 dma_unmap_single(&adapter->pdev->dev,
274 tx_buffer_info->dma, 614 tx_buffer_info->dma,
275 tx_buffer_info->length, 615 tx_buffer_info->length,
276 PCI_DMA_TODEVICE); 616 DMA_TO_DEVICE);
277 tx_buffer_info->dma = 0; 617 tx_buffer_info->dma = 0;
278 } 618 }
279 if (tx_buffer_info->skb) { 619 if (tx_buffer_info->skb) {
@@ -720,10 +1060,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
720 bi->page_offset ^= (PAGE_SIZE / 2); 1060 bi->page_offset ^= (PAGE_SIZE / 2);
721 } 1061 }
722 1062
723 bi->page_dma = pci_map_page(pdev, bi->page, 1063 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
724 bi->page_offset, 1064 bi->page_offset,
725 (PAGE_SIZE / 2), 1065 (PAGE_SIZE / 2),
726 PCI_DMA_FROMDEVICE); 1066 DMA_FROM_DEVICE);
727 } 1067 }
728 1068
729 if (!bi->skb) { 1069 if (!bi->skb) {
@@ -742,9 +1082,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
742 - skb->data)); 1082 - skb->data));
743 1083
744 bi->skb = skb; 1084 bi->skb = skb;
745 bi->dma = pci_map_single(pdev, skb->data, 1085 bi->dma = dma_map_single(&pdev->dev, skb->data,
746 rx_ring->rx_buf_len, 1086 rx_ring->rx_buf_len,
747 PCI_DMA_FROMDEVICE); 1087 DMA_FROM_DEVICE);
748 } 1088 }
749 /* Refresh the desc even if buffer_addrs didn't change because 1089 /* Refresh the desc even if buffer_addrs didn't change because
750 * each write-back erases this info. */ 1090 * each write-back erases this info. */
@@ -885,16 +1225,17 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
885 */ 1225 */
886 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1226 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
887 else 1227 else
888 pci_unmap_single(pdev, rx_buffer_info->dma, 1228 dma_unmap_single(&pdev->dev,
1229 rx_buffer_info->dma,
889 rx_ring->rx_buf_len, 1230 rx_ring->rx_buf_len,
890 PCI_DMA_FROMDEVICE); 1231 DMA_FROM_DEVICE);
891 rx_buffer_info->dma = 0; 1232 rx_buffer_info->dma = 0;
892 skb_put(skb, len); 1233 skb_put(skb, len);
893 } 1234 }
894 1235
895 if (upper_len) { 1236 if (upper_len) {
896 pci_unmap_page(pdev, rx_buffer_info->page_dma, 1237 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
897 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 1238 PAGE_SIZE / 2, DMA_FROM_DEVICE);
898 rx_buffer_info->page_dma = 0; 1239 rx_buffer_info->page_dma = 0;
899 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1240 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
900 rx_buffer_info->page, 1241 rx_buffer_info->page,
@@ -935,10 +1276,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
935 if (skb->prev) 1276 if (skb->prev)
936 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 1277 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
937 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1278 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
938 if (IXGBE_RSC_CB(skb)->dma) 1279 if (IXGBE_RSC_CB(skb)->dma) {
939 pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, 1280 dma_unmap_single(&pdev->dev,
1281 IXGBE_RSC_CB(skb)->dma,
940 rx_ring->rx_buf_len, 1282 rx_ring->rx_buf_len,
941 PCI_DMA_FROMDEVICE); 1283 DMA_FROM_DEVICE);
1284 IXGBE_RSC_CB(skb)->dma = 0;
1285 }
942 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 1286 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
943 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; 1287 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
944 else 1288 else
@@ -1050,7 +1394,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1050 */ 1394 */
1051 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 1395 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1052 q_vector = adapter->q_vector[v_idx]; 1396 q_vector = adapter->q_vector[v_idx];
1053 /* XXX for_each_bit(...) */ 1397 /* XXX for_each_set_bit(...) */
1054 r_idx = find_first_bit(q_vector->rxr_idx, 1398 r_idx = find_first_bit(q_vector->rxr_idx,
1055 adapter->num_rx_queues); 1399 adapter->num_rx_queues);
1056 1400
@@ -1187,6 +1531,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1187 itr_reg |= (itr_reg << 16); 1531 itr_reg |= (itr_reg << 16);
1188 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1532 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1189 /* 1533 /*
1534 * 82599 can support a value of zero, so allow it for
1535 * max interrupt rate, but there is an errata where it can
1536 * not be zero with RSC
1537 */
1538 if (itr_reg == 8 &&
1539 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1540 itr_reg = 0;
1541
1542 /*
1190 * set the WDIS bit to not clear the timer bits and cause an 1543 * set the WDIS bit to not clear the timer bits and cause an
1191 * immediate assertion of the interrupt 1544 * immediate assertion of the interrupt
1192 */ 1545 */
@@ -2369,7 +2722,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2369 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); 2722 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2370 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); 2723 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2371 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 2724 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2372 ixgbe_set_vmolr(hw, adapter->num_vfs); 2725 ixgbe_set_vmolr(hw, adapter->num_vfs, true);
2373 } 2726 }
2374 2727
2375 /* Program MRQC for the distribution of queues */ 2728 /* Program MRQC for the distribution of queues */
@@ -2479,12 +2832,74 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2479 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); 2832 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
2480} 2833}
2481 2834
2835/**
2836 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
2837 * @adapter: driver data
2838 */
2839static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
2840{
2841 struct ixgbe_hw *hw = &adapter->hw;
2842 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2843 int i, j;
2844
2845 switch (hw->mac.type) {
2846 case ixgbe_mac_82598EB:
2847 vlnctrl &= ~(IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE);
2848 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2849 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2850 break;
2851 case ixgbe_mac_82599EB:
2852 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
2853 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2854 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2855 for (i = 0; i < adapter->num_rx_queues; i++) {
2856 j = adapter->rx_ring[i]->reg_idx;
2857 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2858 vlnctrl &= ~IXGBE_RXDCTL_VME;
2859 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2860 }
2861 break;
2862 default:
2863 break;
2864 }
2865}
2866
2867/**
2868 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
2869 * @adapter: driver data
2870 */
2871static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
2872{
2873 struct ixgbe_hw *hw = &adapter->hw;
2874 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2875 int i, j;
2876
2877 switch (hw->mac.type) {
2878 case ixgbe_mac_82598EB:
2879 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2880 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2881 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2882 break;
2883 case ixgbe_mac_82599EB:
2884 vlnctrl |= IXGBE_VLNCTRL_VFE;
2885 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2886 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2887 for (i = 0; i < adapter->num_rx_queues; i++) {
2888 j = adapter->rx_ring[i]->reg_idx;
2889 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2890 vlnctrl |= IXGBE_RXDCTL_VME;
2891 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2892 }
2893 break;
2894 default:
2895 break;
2896 }
2897}
2898
2482static void ixgbe_vlan_rx_register(struct net_device *netdev, 2899static void ixgbe_vlan_rx_register(struct net_device *netdev,
2483 struct vlan_group *grp) 2900 struct vlan_group *grp)
2484{ 2901{
2485 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2902 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2486 u32 ctrl;
2487 int i, j;
2488 2903
2489 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2904 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2490 ixgbe_irq_disable(adapter); 2905 ixgbe_irq_disable(adapter);
@@ -2495,25 +2910,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
2495 * still receive traffic from a DCB-enabled host even if we're 2910 * still receive traffic from a DCB-enabled host even if we're
2496 * not in DCB mode. 2911 * not in DCB mode.
2497 */ 2912 */
2498 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 2913 ixgbe_vlan_filter_enable(adapter);
2499
2500 /* Disable CFI check */
2501 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2502
2503 /* enable VLAN tag stripping */
2504 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2505 ctrl |= IXGBE_VLNCTRL_VME;
2506 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2507 for (i = 0; i < adapter->num_rx_queues; i++) {
2508 u32 ctrl;
2509 j = adapter->rx_ring[i]->reg_idx;
2510 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2511 ctrl |= IXGBE_RXDCTL_VME;
2512 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2513 }
2514 }
2515
2516 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2517 2914
2518 ixgbe_vlan_rx_add_vid(netdev, 0); 2915 ixgbe_vlan_rx_add_vid(netdev, 0);
2519 2916
@@ -2535,21 +2932,6 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2535 } 2932 }
2536} 2933}
2537 2934
2538static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2539{
2540 struct dev_mc_list *mc_ptr;
2541 u8 *addr = *mc_addr_ptr;
2542 *vmdq = 0;
2543
2544 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
2545 if (mc_ptr->next)
2546 *mc_addr_ptr = mc_ptr->next->dmi_addr;
2547 else
2548 *mc_addr_ptr = NULL;
2549
2550 return addr;
2551}
2552
2553/** 2935/**
2554 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set 2936 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
2555 * @netdev: network interface device structure 2937 * @netdev: network interface device structure
@@ -2563,19 +2945,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
2563{ 2945{
2564 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2946 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2565 struct ixgbe_hw *hw = &adapter->hw; 2947 struct ixgbe_hw *hw = &adapter->hw;
2566 u32 fctrl, vlnctrl; 2948 u32 fctrl;
2567 u8 *addr_list = NULL;
2568 int addr_count = 0;
2569 2949
2570 /* Check for Promiscuous and All Multicast modes */ 2950 /* Check for Promiscuous and All Multicast modes */
2571 2951
2572 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2952 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2573 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2574 2953
2575 if (netdev->flags & IFF_PROMISC) { 2954 if (netdev->flags & IFF_PROMISC) {
2576 hw->addr_ctrl.user_set_promisc = 1; 2955 hw->addr_ctrl.user_set_promisc = 1;
2577 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2956 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2578 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 2957 /* don't hardware filter vlans in promisc mode */
2958 ixgbe_vlan_filter_disable(adapter);
2579 } else { 2959 } else {
2580 if (netdev->flags & IFF_ALLMULTI) { 2960 if (netdev->flags & IFF_ALLMULTI) {
2581 fctrl |= IXGBE_FCTRL_MPE; 2961 fctrl |= IXGBE_FCTRL_MPE;
@@ -2583,22 +2963,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
2583 } else { 2963 } else {
2584 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2964 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2585 } 2965 }
2586 vlnctrl |= IXGBE_VLNCTRL_VFE; 2966 ixgbe_vlan_filter_enable(adapter);
2587 hw->addr_ctrl.user_set_promisc = 0; 2967 hw->addr_ctrl.user_set_promisc = 0;
2588 } 2968 }
2589 2969
2590 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2970 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2591 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2592 2971
2593 /* reprogram secondary unicast list */ 2972 /* reprogram secondary unicast list */
2594 hw->mac.ops.update_uc_addr_list(hw, netdev); 2973 hw->mac.ops.update_uc_addr_list(hw, netdev);
2595 2974
2596 /* reprogram multicast list */ 2975 /* reprogram multicast list */
2597 addr_count = netdev_mc_count(netdev); 2976 hw->mac.ops.update_mc_addr_list(hw, netdev);
2598 if (addr_count) 2977
2599 addr_list = netdev->mc_list->dmi_addr;
2600 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2601 ixgbe_addr_list_itr);
2602 if (adapter->num_vfs) 2978 if (adapter->num_vfs)
2603 ixgbe_restore_vf_multicasts(adapter); 2979 ixgbe_restore_vf_multicasts(adapter);
2604} 2980}
@@ -2658,7 +3034,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2658static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 3034static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2659{ 3035{
2660 struct ixgbe_hw *hw = &adapter->hw; 3036 struct ixgbe_hw *hw = &adapter->hw;
2661 u32 txdctl, vlnctrl; 3037 u32 txdctl;
2662 int i, j; 3038 int i, j;
2663 3039
2664 ixgbe_dcb_check_config(&adapter->dcb_cfg); 3040 ixgbe_dcb_check_config(&adapter->dcb_cfg);
@@ -2676,22 +3052,8 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2676 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 3052 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2677 } 3053 }
2678 /* Enable VLAN tag insert/strip */ 3054 /* Enable VLAN tag insert/strip */
2679 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3055 ixgbe_vlan_filter_enable(adapter);
2680 if (hw->mac.type == ixgbe_mac_82598EB) { 3056
2681 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2682 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2683 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2684 } else if (hw->mac.type == ixgbe_mac_82599EB) {
2685 vlnctrl |= IXGBE_VLNCTRL_VFE;
2686 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2687 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2688 for (i = 0; i < adapter->num_rx_queues; i++) {
2689 j = adapter->rx_ring[i]->reg_idx;
2690 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2691 vlnctrl |= IXGBE_RXDCTL_VME;
2692 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2693 }
2694 }
2695 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3057 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2696} 3058}
2697 3059
@@ -2924,8 +3286,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2924 for (i = 0; i < adapter->num_tx_queues; i++) { 3286 for (i = 0; i < adapter->num_tx_queues; i++) {
2925 j = adapter->tx_ring[i]->reg_idx; 3287 j = adapter->tx_ring[i]->reg_idx;
2926 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 3288 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2927 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 3289 if (adapter->rx_itr_setting == 0) {
2928 txdctl |= (8 << 16); 3290 /* cannot set wthresh when itr==0 */
3291 txdctl &= ~0x007F0000;
3292 } else {
3293 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
3294 txdctl |= (8 << 16);
3295 }
2929 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 3296 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2930 } 3297 }
2931 3298
@@ -2979,6 +3346,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2979 else 3346 else
2980 ixgbe_configure_msi_and_legacy(adapter); 3347 ixgbe_configure_msi_and_legacy(adapter);
2981 3348
3349 /* enable the optics */
3350 if (hw->phy.multispeed_fiber)
3351 hw->mac.ops.enable_tx_laser(hw);
3352
2982 clear_bit(__IXGBE_DOWN, &adapter->state); 3353 clear_bit(__IXGBE_DOWN, &adapter->state);
2983 ixgbe_napi_enable_all(adapter); 3354 ixgbe_napi_enable_all(adapter);
2984 3355
@@ -3054,6 +3425,14 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3054 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 3425 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3055 msleep(1); 3426 msleep(1);
3056 ixgbe_down(adapter); 3427 ixgbe_down(adapter);
3428 /*
3429 * If SR-IOV enabled then wait a bit before bringing the adapter
3430 * back up to give the VFs time to respond to the reset. The
3431 * two second wait is based upon the watchdog timer cycle in
3432 * the VF driver.
3433 */
3434 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3435 msleep(2000);
3057 ixgbe_up(adapter); 3436 ixgbe_up(adapter);
3058 clear_bit(__IXGBE_RESETTING, &adapter->state); 3437 clear_bit(__IXGBE_RESETTING, &adapter->state);
3059} 3438}
@@ -3116,9 +3495,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3116 3495
3117 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3496 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3118 if (rx_buffer_info->dma) { 3497 if (rx_buffer_info->dma) {
3119 pci_unmap_single(pdev, rx_buffer_info->dma, 3498 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
3120 rx_ring->rx_buf_len, 3499 rx_ring->rx_buf_len,
3121 PCI_DMA_FROMDEVICE); 3500 DMA_FROM_DEVICE);
3122 rx_buffer_info->dma = 0; 3501 rx_buffer_info->dma = 0;
3123 } 3502 }
3124 if (rx_buffer_info->skb) { 3503 if (rx_buffer_info->skb) {
@@ -3126,10 +3505,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3126 rx_buffer_info->skb = NULL; 3505 rx_buffer_info->skb = NULL;
3127 do { 3506 do {
3128 struct sk_buff *this = skb; 3507 struct sk_buff *this = skb;
3129 if (IXGBE_RSC_CB(this)->dma) 3508 if (IXGBE_RSC_CB(this)->dma) {
3130 pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, 3509 dma_unmap_single(&pdev->dev,
3510 IXGBE_RSC_CB(this)->dma,
3131 rx_ring->rx_buf_len, 3511 rx_ring->rx_buf_len,
3132 PCI_DMA_FROMDEVICE); 3512 DMA_FROM_DEVICE);
3513 IXGBE_RSC_CB(this)->dma = 0;
3514 }
3133 skb = skb->prev; 3515 skb = skb->prev;
3134 dev_kfree_skb(this); 3516 dev_kfree_skb(this);
3135 } while (skb); 3517 } while (skb);
@@ -3137,8 +3519,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3137 if (!rx_buffer_info->page) 3519 if (!rx_buffer_info->page)
3138 continue; 3520 continue;
3139 if (rx_buffer_info->page_dma) { 3521 if (rx_buffer_info->page_dma) {
3140 pci_unmap_page(pdev, rx_buffer_info->page_dma, 3522 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
3141 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 3523 PAGE_SIZE / 2, DMA_FROM_DEVICE);
3142 rx_buffer_info->page_dma = 0; 3524 rx_buffer_info->page_dma = 0;
3143 } 3525 }
3144 put_page(rx_buffer_info->page); 3526 put_page(rx_buffer_info->page);
@@ -3230,37 +3612,44 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3230 /* signal that we are down to the interrupt handler */ 3612 /* signal that we are down to the interrupt handler */
3231 set_bit(__IXGBE_DOWN, &adapter->state); 3613 set_bit(__IXGBE_DOWN, &adapter->state);
3232 3614
3615 /* power down the optics */
3616 if (hw->phy.multispeed_fiber)
3617 hw->mac.ops.disable_tx_laser(hw);
3618
3233 /* disable receive for all VFs and wait one second */ 3619 /* disable receive for all VFs and wait one second */
3234 if (adapter->num_vfs) { 3620 if (adapter->num_vfs) {
3235 for (i = 0 ; i < adapter->num_vfs; i++)
3236 adapter->vfinfo[i].clear_to_send = 0;
3237
3238 /* ping all the active vfs to let them know we are going down */ 3621 /* ping all the active vfs to let them know we are going down */
3239 ixgbe_ping_all_vfs(adapter); 3622 ixgbe_ping_all_vfs(adapter);
3623
3240 /* Disable all VFTE/VFRE TX/RX */ 3624 /* Disable all VFTE/VFRE TX/RX */
3241 ixgbe_disable_tx_rx(adapter); 3625 ixgbe_disable_tx_rx(adapter);
3626
3627 /* Mark all the VFs as inactive */
3628 for (i = 0 ; i < adapter->num_vfs; i++)
3629 adapter->vfinfo[i].clear_to_send = 0;
3242 } 3630 }
3243 3631
3244 /* disable receives */ 3632 /* disable receives */
3245 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3633 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3246 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 3634 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3247 3635
3248 netif_tx_disable(netdev);
3249
3250 IXGBE_WRITE_FLUSH(hw); 3636 IXGBE_WRITE_FLUSH(hw);
3251 msleep(10); 3637 msleep(10);
3252 3638
3253 netif_tx_stop_all_queues(netdev); 3639 netif_tx_stop_all_queues(netdev);
3254 3640
3255 ixgbe_irq_disable(adapter);
3256
3257 ixgbe_napi_disable_all(adapter);
3258
3259 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 3641 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3260 del_timer_sync(&adapter->sfp_timer); 3642 del_timer_sync(&adapter->sfp_timer);
3261 del_timer_sync(&adapter->watchdog_timer); 3643 del_timer_sync(&adapter->watchdog_timer);
3262 cancel_work_sync(&adapter->watchdog_task); 3644 cancel_work_sync(&adapter->watchdog_task);
3263 3645
3646 netif_carrier_off(netdev);
3647 netif_tx_disable(netdev);
3648
3649 ixgbe_irq_disable(adapter);
3650
3651 ixgbe_napi_disable_all(adapter);
3652
3264 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 3653 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3265 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 3654 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3266 cancel_work_sync(&adapter->fdir_reinit_task); 3655 cancel_work_sync(&adapter->fdir_reinit_task);
@@ -3278,8 +3667,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3278 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 3667 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3279 ~IXGBE_DMATXCTL_TE)); 3668 ~IXGBE_DMATXCTL_TE));
3280 3669
3281 netif_carrier_off(netdev);
3282
3283 /* clear n-tuple filters that are cached */ 3670 /* clear n-tuple filters that are cached */
3284 ethtool_ntuple_flush(netdev); 3671 ethtool_ntuple_flush(netdev);
3285 3672
@@ -3356,6 +3743,8 @@ static void ixgbe_reset_task(struct work_struct *work)
3356 3743
3357 adapter->tx_timeout_count++; 3744 adapter->tx_timeout_count++;
3358 3745
3746 ixgbe_dump(adapter);
3747 netdev_err(adapter->netdev, "Reset adapter\n");
3359 ixgbe_reinit_locked(adapter); 3748 ixgbe_reinit_locked(adapter);
3360} 3749}
3361 3750
@@ -3456,12 +3845,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3456 adapter->num_tx_queues = 1; 3845 adapter->num_tx_queues = 1;
3457#ifdef CONFIG_IXGBE_DCB 3846#ifdef CONFIG_IXGBE_DCB
3458 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3847 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3459 DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n"); 3848 DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
3460 ixgbe_set_dcb_queues(adapter); 3849 ixgbe_set_dcb_queues(adapter);
3461 } 3850 }
3462#endif 3851#endif
3463 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3852 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3464 DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n"); 3853 DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
3465 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3854 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3466 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 3855 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3467 ixgbe_set_fdir_queues(adapter); 3856 ixgbe_set_fdir_queues(adapter);
@@ -4358,8 +4747,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4358 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 4747 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4359 tx_ring->size = ALIGN(tx_ring->size, 4096); 4748 tx_ring->size = ALIGN(tx_ring->size, 4096);
4360 4749
4361 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 4750 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
4362 &tx_ring->dma); 4751 &tx_ring->dma, GFP_KERNEL);
4363 if (!tx_ring->desc) 4752 if (!tx_ring->desc)
4364 goto err; 4753 goto err;
4365 4754
@@ -4429,7 +4818,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4429 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 4818 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4430 rx_ring->size = ALIGN(rx_ring->size, 4096); 4819 rx_ring->size = ALIGN(rx_ring->size, 4096);
4431 4820
4432 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma); 4821 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
4822 &rx_ring->dma, GFP_KERNEL);
4433 4823
4434 if (!rx_ring->desc) { 4824 if (!rx_ring->desc) {
4435 DPRINTK(PROBE, ERR, 4825 DPRINTK(PROBE, ERR,
@@ -4490,7 +4880,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
4490 vfree(tx_ring->tx_buffer_info); 4880 vfree(tx_ring->tx_buffer_info);
4491 tx_ring->tx_buffer_info = NULL; 4881 tx_ring->tx_buffer_info = NULL;
4492 4882
4493 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 4883 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
4884 tx_ring->dma);
4494 4885
4495 tx_ring->desc = NULL; 4886 tx_ring->desc = NULL;
4496} 4887}
@@ -4527,7 +4918,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
4527 vfree(rx_ring->rx_buffer_info); 4918 vfree(rx_ring->rx_buffer_info);
4528 rx_ring->rx_buffer_info = NULL; 4919 rx_ring->rx_buffer_info = NULL;
4529 4920
4530 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 4921 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
4922 rx_ring->dma);
4531 4923
4532 rx_ring->desc = NULL; 4924 rx_ring->desc = NULL;
4533} 4925}
@@ -5018,6 +5410,7 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5018 autoneg = hw->phy.autoneg_advertised; 5410 autoneg = hw->phy.autoneg_advertised;
5019 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 5411 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
5020 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); 5412 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
5413 hw->mac.autotry_restart = false;
5021 if (hw->mac.ops.setup_link) 5414 if (hw->mac.ops.setup_link)
5022 hw->mac.ops.setup_link(hw, autoneg, negotiation, true); 5415 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
5023 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 5416 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -5076,7 +5469,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5076 &(adapter->tx_ring[i]->reinit_state)); 5469 &(adapter->tx_ring[i]->reinit_state));
5077 } else { 5470 } else {
5078 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " 5471 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
5079 "ignored adding FDIR ATR filters \n"); 5472 "ignored adding FDIR ATR filters\n");
5080 } 5473 }
5081 /* Done FDIR Re-initialization, enable transmits */ 5474 /* Done FDIR Re-initialization, enable transmits */
5082 netif_tx_start_all_queues(adapter->netdev); 5475 netif_tx_start_all_queues(adapter->netdev);
@@ -5396,10 +5789,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5396 5789
5397 tx_buffer_info->length = size; 5790 tx_buffer_info->length = size;
5398 tx_buffer_info->mapped_as_page = false; 5791 tx_buffer_info->mapped_as_page = false;
5399 tx_buffer_info->dma = pci_map_single(pdev, 5792 tx_buffer_info->dma = dma_map_single(&pdev->dev,
5400 skb->data + offset, 5793 skb->data + offset,
5401 size, PCI_DMA_TODEVICE); 5794 size, DMA_TO_DEVICE);
5402 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5795 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
5403 goto dma_error; 5796 goto dma_error;
5404 tx_buffer_info->time_stamp = jiffies; 5797 tx_buffer_info->time_stamp = jiffies;
5405 tx_buffer_info->next_to_watch = i; 5798 tx_buffer_info->next_to_watch = i;
@@ -5432,12 +5825,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5432 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 5825 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5433 5826
5434 tx_buffer_info->length = size; 5827 tx_buffer_info->length = size;
5435 tx_buffer_info->dma = pci_map_page(adapter->pdev, 5828 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
5436 frag->page, 5829 frag->page,
5437 offset, size, 5830 offset, size,
5438 PCI_DMA_TODEVICE); 5831 DMA_TO_DEVICE);
5439 tx_buffer_info->mapped_as_page = true; 5832 tx_buffer_info->mapped_as_page = true;
5440 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5833 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
5441 goto dma_error; 5834 goto dma_error;
5442 tx_buffer_info->time_stamp = jiffies; 5835 tx_buffer_info->time_stamp = jiffies;
5443 tx_buffer_info->next_to_watch = i; 5836 tx_buffer_info->next_to_watch = i;
@@ -5633,7 +6026,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5633 6026
5634#ifdef IXGBE_FCOE 6027#ifdef IXGBE_FCOE
5635 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 6028 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5636 (skb->protocol == htons(ETH_P_FCOE))) { 6029 ((skb->protocol == htons(ETH_P_FCOE)) ||
6030 (skb->protocol == htons(ETH_P_FIP)))) {
5637 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6031 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
5638 txq += adapter->ring_feature[RING_F_FCOE].mask; 6032 txq += adapter->ring_feature[RING_F_FCOE].mask;
5639 return txq; 6033 return txq;
@@ -5680,18 +6074,25 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5680 6074
5681 tx_ring = adapter->tx_ring[skb->queue_mapping]; 6075 tx_ring = adapter->tx_ring[skb->queue_mapping];
5682 6076
5683 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5684 (skb->protocol == htons(ETH_P_FCOE))) {
5685 tx_flags |= IXGBE_TX_FLAGS_FCOE;
5686#ifdef IXGBE_FCOE 6077#ifdef IXGBE_FCOE
6078 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
5687#ifdef CONFIG_IXGBE_DCB 6079#ifdef CONFIG_IXGBE_DCB
5688 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 6080 /* for FCoE with DCB, we force the priority to what
5689 << IXGBE_TX_FLAGS_VLAN_SHIFT); 6081 * was specified by the switch */
5690 tx_flags |= ((adapter->fcoe.up << 13) 6082 if ((skb->protocol == htons(ETH_P_FCOE)) ||
5691 << IXGBE_TX_FLAGS_VLAN_SHIFT); 6083 (skb->protocol == htons(ETH_P_FIP))) {
5692#endif 6084 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6085 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6086 tx_flags |= ((adapter->fcoe.up << 13)
6087 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6088 }
5693#endif 6089#endif
6090 /* flag for FCoE offloads */
6091 if (skb->protocol == htons(ETH_P_FCOE))
6092 tx_flags |= IXGBE_TX_FLAGS_FCOE;
5694 } 6093 }
6094#endif
6095
5695 /* four things can cause us to need a context descriptor */ 6096 /* four things can cause us to need a context descriptor */
5696 if (skb_is_gso(skb) || 6097 if (skb_is_gso(skb) ||
5697 (skb->ip_summed == CHECKSUM_PARTIAL) || 6098 (skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -5910,6 +6311,10 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5910 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 6311 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
5911 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 6312 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
5912 .ndo_do_ioctl = ixgbe_ioctl, 6313 .ndo_do_ioctl = ixgbe_ioctl,
6314 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6315 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6316 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6317 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
5913#ifdef CONFIG_NET_POLL_CONTROLLER 6318#ifdef CONFIG_NET_POLL_CONTROLLER
5914 .ndo_poll_controller = ixgbe_netpoll, 6319 .ndo_poll_controller = ixgbe_netpoll,
5915#endif 6320#endif
@@ -6007,13 +6412,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6007 if (err) 6412 if (err)
6008 return err; 6413 return err;
6009 6414
6010 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 6415 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6011 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 6416 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
6012 pci_using_dac = 1; 6417 pci_using_dac = 1;
6013 } else { 6418 } else {
6014 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6419 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6015 if (err) { 6420 if (err) {
6016 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 6421 err = dma_set_coherent_mask(&pdev->dev,
6422 DMA_BIT_MASK(32));
6017 if (err) { 6423 if (err) {
6018 dev_err(&pdev->dev, "No usable DMA " 6424 dev_err(&pdev->dev, "No usable DMA "
6019 "configuration, aborting\n"); 6425 "configuration, aborting\n");
@@ -6046,7 +6452,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6046 indices += min_t(unsigned int, num_possible_cpus(), 6452 indices += min_t(unsigned int, num_possible_cpus(),
6047 IXGBE_MAX_FCOE_INDICES); 6453 IXGBE_MAX_FCOE_INDICES);
6048#endif 6454#endif
6049 indices = min_t(unsigned int, indices, MAX_TX_QUEUES);
6050 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); 6455 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
6051 if (!netdev) { 6456 if (!netdev) {
6052 err = -ENOMEM; 6457 err = -ENOMEM;
@@ -6230,6 +6635,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6230 goto err_eeprom; 6635 goto err_eeprom;
6231 } 6636 }
6232 6637
6638 /* power down the optics */
6639 if (hw->phy.multispeed_fiber)
6640 hw->mac.ops.disable_tx_laser(hw);
6641
6233 init_timer(&adapter->watchdog_timer); 6642 init_timer(&adapter->watchdog_timer);
6234 adapter->watchdog_timer.function = &ixgbe_watchdog; 6643 adapter->watchdog_timer.function = &ixgbe_watchdog;
6235 adapter->watchdog_timer.data = (unsigned long)adapter; 6644 adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -6245,9 +6654,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6245 case IXGBE_DEV_ID_82599_KX4: 6654 case IXGBE_DEV_ID_82599_KX4:
6246 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 6655 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6247 IXGBE_WUFC_MC | IXGBE_WUFC_BC); 6656 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
6248 /* Enable ACPI wakeup in GRC */
6249 IXGBE_WRITE_REG(hw, IXGBE_GRC,
6250 (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
6251 break; 6657 break;
6252 default: 6658 default:
6253 adapter->wol = 0; 6659 adapter->wol = 0;
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 1c1efd386956..d6d5b843d625 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -475,7 +475,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
475 msleep(edata); 475 msleep(edata);
476 break; 476 break;
477 case IXGBE_DATA_NL: 477 case IXGBE_DATA_NL:
478 hw_dbg(hw, "DATA: \n"); 478 hw_dbg(hw, "DATA:\n");
479 data_offset++; 479 data_offset++;
480 hw->eeprom.ops.read(hw, data_offset++, 480 hw->eeprom.ops.read(hw, data_offset++,
481 &phy_offset); 481 &phy_offset);
@@ -491,7 +491,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
491 break; 491 break;
492 case IXGBE_CONTROL_NL: 492 case IXGBE_CONTROL_NL:
493 data_offset++; 493 data_offset++;
494 hw_dbg(hw, "CONTROL: \n"); 494 hw_dbg(hw, "CONTROL:\n");
495 if (edata == IXGBE_CONTROL_EOL_NL) { 495 if (edata == IXGBE_CONTROL_EOL_NL) {
496 hw_dbg(hw, "EOL\n"); 496 hw_dbg(hw, "EOL\n");
497 end_data = true; 497 end_data = true;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index d4cd20f30199..f6cee94ec8e8 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -48,7 +48,11 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
48 int entries, u16 *hash_list, u32 vf) 48 int entries, u16 *hash_list, u32 vf)
49{ 49{
50 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 50 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
51 struct ixgbe_hw *hw = &adapter->hw;
51 int i; 52 int i;
53 u32 vector_bit;
54 u32 vector_reg;
55 u32 mta_reg;
52 56
53 /* only so many hash values supported */ 57 /* only so many hash values supported */
54 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); 58 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -68,8 +72,13 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
68 vfinfo->vf_mc_hashes[i] = hash_list[i];; 72 vfinfo->vf_mc_hashes[i] = hash_list[i];;
69 } 73 }
70 74
71 /* Flush and reset the mta with the new values */ 75 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
72 ixgbe_set_rx_mode(adapter->netdev); 76 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
77 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
78 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
79 mta_reg |= (1 << vector_bit);
80 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
81 }
73 82
74 return 0; 83 return 0;
75} 84}
@@ -98,38 +107,51 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
98 107
99int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) 108int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
100{ 109{
101 u32 ctrl;
102
103 /* Check if global VLAN already set, if not set it */
104 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
105 if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
106 /* enable VLAN tag insert/strip */
107 ctrl |= IXGBE_VLNCTRL_VFE;
108 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
109 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
110 }
111
112 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
113} 111}
114 112
115 113
116void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf) 114void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
117{ 115{
118 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 116 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
119 vmolr |= (IXGBE_VMOLR_AUPE | 117 vmolr |= (IXGBE_VMOLR_ROMPE |
120 IXGBE_VMOLR_ROMPE |
121 IXGBE_VMOLR_ROPE | 118 IXGBE_VMOLR_ROPE |
122 IXGBE_VMOLR_BAM); 119 IXGBE_VMOLR_BAM);
120 if (aupe)
121 vmolr |= IXGBE_VMOLR_AUPE;
122 else
123 vmolr &= ~IXGBE_VMOLR_AUPE;
123 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 124 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
124} 125}
125 126
127static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
128{
129 struct ixgbe_hw *hw = &adapter->hw;
130
131 if (vid)
132 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
133 (vid | IXGBE_VMVIR_VLANA_DEFAULT));
134 else
135 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
136}
137
126inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 138inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
127{ 139{
128 struct ixgbe_hw *hw = &adapter->hw; 140 struct ixgbe_hw *hw = &adapter->hw;
129 141
130 /* reset offloads to defaults */ 142 /* reset offloads to defaults */
131 ixgbe_set_vmolr(hw, vf); 143 if (adapter->vfinfo[vf].pf_vlan) {
132 144 ixgbe_set_vf_vlan(adapter, true,
145 adapter->vfinfo[vf].pf_vlan, vf);
146 ixgbe_set_vmvir(adapter,
147 (adapter->vfinfo[vf].pf_vlan |
148 (adapter->vfinfo[vf].pf_qos <<
149 VLAN_PRIO_SHIFT)), vf);
150 ixgbe_set_vmolr(hw, vf, false);
151 } else {
152 ixgbe_set_vmvir(adapter, 0, vf);
153 ixgbe_set_vmolr(hw, vf, true);
154 }
133 155
134 /* reset multicast table array for vf */ 156 /* reset multicast table array for vf */
135 adapter->vfinfo[vf].num_vf_mc_hashes = 0; 157 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
@@ -263,10 +285,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
263 case IXGBE_VF_SET_MAC_ADDR: 285 case IXGBE_VF_SET_MAC_ADDR:
264 { 286 {
265 u8 *new_mac = ((u8 *)(&msgbuf[1])); 287 u8 *new_mac = ((u8 *)(&msgbuf[1]));
266 if (is_valid_ether_addr(new_mac)) 288 if (is_valid_ether_addr(new_mac) &&
289 !adapter->vfinfo[vf].pf_set_mac)
267 ixgbe_set_vf_mac(adapter, vf, new_mac); 290 ixgbe_set_vf_mac(adapter, vf, new_mac);
268 else 291 else
269 retval = -1; 292 ixgbe_set_vf_mac(adapter,
293 vf, adapter->vfinfo[vf].vf_mac_addresses);
270 } 294 }
271 break; 295 break;
272 case IXGBE_VF_SET_MULTICAST: 296 case IXGBE_VF_SET_MULTICAST:
@@ -360,3 +384,76 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
360 } 384 }
361} 385}
362 386
387int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
388{
389 struct ixgbe_adapter *adapter = netdev_priv(netdev);
390 if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
391 return -EINVAL;
392 adapter->vfinfo[vf].pf_set_mac = true;
393 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
394 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
395 " change effective.");
396 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
397 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
398 " but the PF device is not up.\n");
399 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
400 " attempting to use the VF device.\n");
401 }
402 return ixgbe_set_vf_mac(adapter, vf, mac);
403}
404
405int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
406{
407 int err = 0;
408 struct ixgbe_adapter *adapter = netdev_priv(netdev);
409
410 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
411 return -EINVAL;
412 if (vlan || qos) {
413 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
414 if (err)
415 goto out;
416 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
417 ixgbe_set_vmolr(&adapter->hw, vf, false);
418 adapter->vfinfo[vf].pf_vlan = vlan;
419 adapter->vfinfo[vf].pf_qos = qos;
420 dev_info(&adapter->pdev->dev,
421 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
422 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
423 dev_warn(&adapter->pdev->dev,
424 "The VF VLAN has been set,"
425 " but the PF device is not up.\n");
426 dev_warn(&adapter->pdev->dev,
427 "Bring the PF device up before"
428 " attempting to use the VF device.\n");
429 }
430 } else {
431 err = ixgbe_set_vf_vlan(adapter, false,
432 adapter->vfinfo[vf].pf_vlan, vf);
433 ixgbe_set_vmvir(adapter, vlan, vf);
434 ixgbe_set_vmolr(&adapter->hw, vf, true);
435 adapter->vfinfo[vf].pf_vlan = 0;
436 adapter->vfinfo[vf].pf_qos = 0;
437 }
438out:
439 return err;
440}
441
442int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
443{
444 return -EOPNOTSUPP;
445}
446
447int ixgbe_ndo_get_vf_config(struct net_device *netdev,
448 int vf, struct ifla_vf_info *ivi)
449{
450 struct ixgbe_adapter *adapter = netdev_priv(netdev);
451 if (vf >= adapter->num_vfs)
452 return -EINVAL;
453 ivi->vf = vf;
454 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
455 ivi->tx_rate = 0;
456 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
457 ivi->qos = adapter->vfinfo[vf].pf_qos;
458 return 0;
459}
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 51d1106c45a1..184730ecdfb6 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -32,7 +32,7 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
32 int entries, u16 *hash_list, u32 vf); 32 int entries, u16 *hash_list, u32 vf);
33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); 33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); 34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf); 35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf); 36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf); 37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
38void ixgbe_msg_task(struct ixgbe_adapter *adapter); 38void ixgbe_msg_task(struct ixgbe_adapter *adapter);
@@ -42,6 +42,12 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); 42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); 43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
44void ixgbe_dump_registers(struct ixgbe_adapter *adapter); 44void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
45int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
46int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
47 u8 qos);
48int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
49int ixgbe_ndo_get_vf_config(struct net_device *netdev,
50 int vf, struct ifla_vf_info *ivi);
45 51
46#endif /* _IXGBE_SRIOV_H_ */ 52#endif /* _IXGBE_SRIOV_H_ */
47 53
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 2be907466593..4277cbbb8126 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -219,6 +219,7 @@
219#define IXGBE_MTQC 0x08120 219#define IXGBE_MTQC 0x08120
220#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ 220#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
221#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ 221#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
222#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
222#define IXGBE_VT_CTL 0x051B0 223#define IXGBE_VT_CTL 0x051B0
223#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) 224#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
224#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) 225#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
@@ -1298,6 +1299,7 @@
1298#define IXGBE_ETQF_FILTER_BCN 1 1299#define IXGBE_ETQF_FILTER_BCN 1
1299#define IXGBE_ETQF_FILTER_FCOE 2 1300#define IXGBE_ETQF_FILTER_FCOE 2
1300#define IXGBE_ETQF_FILTER_1588 3 1301#define IXGBE_ETQF_FILTER_1588 3
1302#define IXGBE_ETQF_FILTER_FIP 4
1301/* VLAN Control Bit Masks */ 1303/* VLAN Control Bit Masks */
1302#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ 1304#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
1303#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ 1305#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@@ -1310,6 +1312,10 @@
1310#define IXGBE_VLVF_ENTRIES 64 1312#define IXGBE_VLVF_ENTRIES 64
1311#define IXGBE_VLVF_VLANID_MASK 0x00000FFF 1313#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
1312 1314
1315/* Per VF Port VLAN insertion rules */
1316#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
1317#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
1318
1313#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ 1319#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
1314 1320
1315/* STATUS Bit Masks */ 1321/* STATUS Bit Masks */
@@ -2397,6 +2403,9 @@ struct ixgbe_mac_operations {
2397 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2403 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2398 2404
2399 /* Link */ 2405 /* Link */
2406 void (*disable_tx_laser)(struct ixgbe_hw *);
2407 void (*enable_tx_laser)(struct ixgbe_hw *);
2408 void (*flap_tx_laser)(struct ixgbe_hw *);
2400 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); 2409 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
2401 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 2410 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
2402 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, 2411 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
@@ -2415,8 +2424,7 @@ struct ixgbe_mac_operations {
2415 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2424 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2416 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2425 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2417 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *); 2426 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2418 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, 2427 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
2419 ixgbe_mc_addr_itr);
2420 s32 (*enable_mc)(struct ixgbe_hw *); 2428 s32 (*enable_mc)(struct ixgbe_hw *);
2421 s32 (*disable_mc)(struct ixgbe_hw *); 2429 s32 (*disable_mc)(struct ixgbe_hw *);
2422 s32 (*clear_vfta)(struct ixgbe_hw *); 2430 s32 (*clear_vfta)(struct ixgbe_hw *);