diff options
author | David S. Miller <davem@davemloft.net> | 2012-07-22 15:23:18 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-22 15:23:18 -0400 |
commit | fd183f6a73fc0ff3e32c636dbb539e35d4c530c9 (patch) | |
tree | 64c63653710ddbc15e81d81c53d47a3928953b51 | |
parent | 5dc7c77967a64a1c2cb1a45f45f580fa18927a4c (diff) | |
parent | 76886596921dd0e058f7f0a16de6151629390d15 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says:
--------------------
This series contains updates to ixgbe and ixgbevf.
...
Akeem G. Abodunrin (1):
igb: reset PHY in the link_up process to recover PHY setting after
power down.
Alexander Duyck (8):
ixgbe: Drop probe_vf and merge functionality into ixgbe_enable_sriov
ixgbe: Change how we check for pre-existing and assigned VFs
ixgbevf: Add lock around mailbox ops to prevent simultaneous access
ixgbevf: Add support for PCI error handling
ixgbe: Fix handling of FDIR_HASH flag
ixgbe: Reduce Rx header size to what is actually used
ixgbe: Use num_tcs.pg_tcs as upper limit for TC when checking based
on UP
ixgbe: Use 1TC DCB instead of disabling DCB for MSI and legacy
interrupts
Don Skidmore (1):
ixgbe: add support for new 82599 device
Greg Rose (1):
ixgbevf: Fix namespace issue with ixgbe_write_eitr
John Fastabend (2):
ixgbe: fix RAR entry counting for generic and fdb_add()
ixgbe: remove extra unused queues in DCB + FCoE case
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 16 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 41 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 140 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 151 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 164 |
10 files changed, 323 insertions, 209 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8adeca9787ca..1050411e7ca3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -1500,11 +1500,12 @@ static void igb_configure(struct igb_adapter *adapter) | |||
1500 | **/ | 1500 | **/ |
1501 | void igb_power_up_link(struct igb_adapter *adapter) | 1501 | void igb_power_up_link(struct igb_adapter *adapter) |
1502 | { | 1502 | { |
1503 | igb_reset_phy(&adapter->hw); | ||
1504 | |||
1503 | if (adapter->hw.phy.media_type == e1000_media_type_copper) | 1505 | if (adapter->hw.phy.media_type == e1000_media_type_copper) |
1504 | igb_power_up_phy_copper(&adapter->hw); | 1506 | igb_power_up_phy_copper(&adapter->hw); |
1505 | else | 1507 | else |
1506 | igb_power_up_serdes_link_82575(&adapter->hw); | 1508 | igb_power_up_serdes_link_82575(&adapter->hw); |
1507 | igb_reset_phy(&adapter->hw); | ||
1508 | } | 1509 | } |
1509 | 1510 | ||
1510 | /** | 1511 | /** |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 5a286adc65c0..b9623e9ea895 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
@@ -77,17 +77,18 @@ | |||
77 | #define IXGBE_MAX_FCPAUSE 0xFFFF | 77 | #define IXGBE_MAX_FCPAUSE 0xFFFF |
78 | 78 | ||
79 | /* Supported Rx Buffer Sizes */ | 79 | /* Supported Rx Buffer Sizes */ |
80 | #define IXGBE_RXBUFFER_512 512 /* Used for packet split */ | 80 | #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ |
81 | #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ | 81 | #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we | 84 | * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we |
85 | * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, | 85 | * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, |
86 | * this adds up to 512 bytes of extra data meaning the smallest allocation | 86 | * this adds up to 448 bytes of extra data. |
87 | * we could have is 1K. | 87 | * |
88 | * i.e. RXBUFFER_512 --> size-1024 slab | 88 | * Since netdev_alloc_skb now allocates a page fragment we can use a value |
89 | * of 256 and the resultant skb will have a truesize of 960 or less. | ||
89 | */ | 90 | */ |
90 | #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512 | 91 | #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 |
91 | 92 | ||
92 | #define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) | 93 | #define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) |
93 | 94 | ||
@@ -130,7 +131,6 @@ struct vf_data_storage { | |||
130 | u16 tx_rate; | 131 | u16 tx_rate; |
131 | u16 vlan_count; | 132 | u16 vlan_count; |
132 | u8 spoofchk_enabled; | 133 | u8 spoofchk_enabled; |
133 | struct pci_dev *vfdev; | ||
134 | }; | 134 | }; |
135 | 135 | ||
136 | struct vf_macvlans { | 136 | struct vf_macvlans { |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 5442b359141e..9bc17c0cb972 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | |||
@@ -232,18 +232,22 @@ u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) | |||
232 | { | 232 | { |
233 | struct tc_configuration *tc_config = &cfg->tc_config[0]; | 233 | struct tc_configuration *tc_config = &cfg->tc_config[0]; |
234 | u8 prio_mask = 1 << up; | 234 | u8 prio_mask = 1 << up; |
235 | u8 tc; | 235 | u8 tc = cfg->num_tcs.pg_tcs; |
236 | |||
237 | /* If tc is 0 then DCB is likely not enabled or supported */ | ||
238 | if (!tc) | ||
239 | goto out; | ||
236 | 240 | ||
237 | /* | 241 | /* |
238 | * Test for TCs 7 through 1 and report the first match we find. If | 242 | * Test from maximum TC to 1 and report the first match we find. If |
239 | * we find no match we can assume that the TC is 0 since the TC must | 243 | * we find no match we can assume that the TC is 0 since the TC must |
240 | * be set for all user priorities | 244 | * be set for all user priorities |
241 | */ | 245 | */ |
242 | for (tc = MAX_TRAFFIC_CLASS - 1; tc; tc--) { | 246 | for (tc--; tc; tc--) { |
243 | if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) | 247 | if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) |
244 | break; | 248 | break; |
245 | } | 249 | } |
246 | 250 | out: | |
247 | return tc; | 251 | return tc; |
248 | } | 252 | } |
249 | 253 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 38d1b65777ad..17ecbcedd548 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | |||
@@ -370,6 +370,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) | |||
370 | adapter->ring_feature[RING_F_RSS].indices = 1; | 370 | adapter->ring_feature[RING_F_RSS].indices = 1; |
371 | adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; | 371 | adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; |
372 | 372 | ||
373 | /* disable ATR as it is not supported when VMDq is enabled */ | ||
374 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
375 | |||
373 | adapter->num_rx_pools = vmdq_i; | 376 | adapter->num_rx_pools = vmdq_i; |
374 | adapter->num_rx_queues_per_pool = tcs; | 377 | adapter->num_rx_queues_per_pool = tcs; |
375 | 378 | ||
@@ -450,6 +453,9 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | |||
450 | f->indices = rss_i; | 453 | f->indices = rss_i; |
451 | f->mask = rss_m; | 454 | f->mask = rss_m; |
452 | 455 | ||
456 | /* disable ATR as it is not supported when multiple TCs are enabled */ | ||
457 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
458 | |||
453 | #ifdef IXGBE_FCOE | 459 | #ifdef IXGBE_FCOE |
454 | /* FCoE enabled queues require special configuration indexed | 460 | /* FCoE enabled queues require special configuration indexed |
455 | * by feature specific indices and offset. Here we map FCoE | 461 | * by feature specific indices and offset. Here we map FCoE |
@@ -606,16 +612,22 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | |||
606 | f->indices = rss_i; | 612 | f->indices = rss_i; |
607 | f->mask = IXGBE_RSS_16Q_MASK; | 613 | f->mask = IXGBE_RSS_16Q_MASK; |
608 | 614 | ||
615 | /* disable ATR by default, it will be configured below */ | ||
616 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
617 | |||
609 | /* | 618 | /* |
610 | * Use Flow Director in addition to RSS to ensure the best | 619 | * Use Flow Director in addition to RSS to ensure the best |
611 | * distribution of flows across cores, even when an FDIR flow | 620 | * distribution of flows across cores, even when an FDIR flow |
612 | * isn't matched. | 621 | * isn't matched. |
613 | */ | 622 | */ |
614 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | 623 | if (rss_i > 1 && adapter->atr_sample_rate) { |
615 | f = &adapter->ring_feature[RING_F_FDIR]; | 624 | f = &adapter->ring_feature[RING_F_FDIR]; |
616 | 625 | ||
617 | f->indices = min_t(u16, num_online_cpus(), f->limit); | 626 | f->indices = min_t(u16, num_online_cpus(), f->limit); |
618 | rss_i = max_t(u16, rss_i, f->indices); | 627 | rss_i = max_t(u16, rss_i, f->indices); |
628 | |||
629 | if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) | ||
630 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
619 | } | 631 | } |
620 | 632 | ||
621 | #ifdef IXGBE_FCOE | 633 | #ifdef IXGBE_FCOE |
@@ -1053,18 +1065,27 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
1053 | return; | 1065 | return; |
1054 | } | 1066 | } |
1055 | 1067 | ||
1056 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 1068 | /* disable DCB if number of TCs exceeds 1 */ |
1057 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | 1069 | if (netdev_get_num_tc(adapter->netdev) > 1) { |
1058 | e_err(probe, | 1070 | e_err(probe, "num TCs exceeds number of queues - disabling DCB\n"); |
1059 | "ATR is not supported while multiple " | 1071 | netdev_reset_tc(adapter->netdev); |
1060 | "queues are disabled. Disabling Flow Director\n"); | 1072 | |
1073 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | ||
1074 | adapter->hw.fc.requested_mode = adapter->last_lfc_mode; | ||
1075 | |||
1076 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | ||
1077 | adapter->temp_dcb_cfg.pfc_mode_enable = false; | ||
1078 | adapter->dcb_cfg.pfc_mode_enable = false; | ||
1061 | } | 1079 | } |
1062 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 1080 | adapter->dcb_cfg.num_tcs.pg_tcs = 1; |
1063 | adapter->atr_sample_rate = 0; | 1081 | adapter->dcb_cfg.num_tcs.pfc_tcs = 1; |
1064 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | 1082 | |
1065 | ixgbe_disable_sriov(adapter); | 1083 | /* disable SR-IOV */ |
1084 | ixgbe_disable_sriov(adapter); | ||
1066 | 1085 | ||
1086 | /* disable RSS */ | ||
1067 | adapter->ring_feature[RING_F_RSS].limit = 1; | 1087 | adapter->ring_feature[RING_F_RSS].limit = 1; |
1088 | |||
1068 | ixgbe_set_num_queues(adapter); | 1089 | ixgbe_set_num_queues(adapter); |
1069 | adapter->num_q_vectors = 1; | 1090 | adapter->num_q_vectors = 1; |
1070 | 1091 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f4e53c1a7338..3b6784cf134a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1517,8 +1517,8 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, | |||
1517 | * 60 bytes if the skb->len is less than 60 for skb_pad. | 1517 | * 60 bytes if the skb->len is less than 60 for skb_pad. |
1518 | */ | 1518 | */ |
1519 | pull_len = skb_frag_size(frag); | 1519 | pull_len = skb_frag_size(frag); |
1520 | if (pull_len > 256) | 1520 | if (pull_len > IXGBE_RX_HDR_SIZE) |
1521 | pull_len = ixgbe_get_headlen(va, pull_len); | 1521 | pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); |
1522 | 1522 | ||
1523 | /* align pull length to size of long to optimize memcpy performance */ | 1523 | /* align pull length to size of long to optimize memcpy performance */ |
1524 | skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); | 1524 | skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); |
@@ -2688,8 +2688,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, | |||
2688 | 32; /* PTHRESH = 32 */ | 2688 | 32; /* PTHRESH = 32 */ |
2689 | 2689 | ||
2690 | /* reinitialize flowdirector state */ | 2690 | /* reinitialize flowdirector state */ |
2691 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && | 2691 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
2692 | adapter->atr_sample_rate) { | ||
2693 | ring->atr_sample_rate = adapter->atr_sample_rate; | 2692 | ring->atr_sample_rate = adapter->atr_sample_rate; |
2694 | ring->atr_count = 0; | 2693 | ring->atr_count = 0; |
2695 | set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); | 2694 | set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); |
@@ -3442,14 +3441,18 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev) | |||
3442 | { | 3441 | { |
3443 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 3442 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
3444 | struct ixgbe_hw *hw = &adapter->hw; | 3443 | struct ixgbe_hw *hw = &adapter->hw; |
3445 | unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; | 3444 | unsigned int rar_entries = hw->mac.num_rar_entries - 1; |
3446 | int count = 0; | 3445 | int count = 0; |
3447 | 3446 | ||
3447 | /* In SR-IOV mode significantly less RAR entries are available */ | ||
3448 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
3449 | rar_entries = IXGBE_MAX_PF_MACVLANS - 1; | ||
3450 | |||
3448 | /* return ENOMEM indicating insufficient memory for addresses */ | 3451 | /* return ENOMEM indicating insufficient memory for addresses */ |
3449 | if (netdev_uc_count(netdev) > rar_entries) | 3452 | if (netdev_uc_count(netdev) > rar_entries) |
3450 | return -ENOMEM; | 3453 | return -ENOMEM; |
3451 | 3454 | ||
3452 | if (!netdev_uc_empty(netdev) && rar_entries) { | 3455 | if (!netdev_uc_empty(netdev)) { |
3453 | struct netdev_hw_addr *ha; | 3456 | struct netdev_hw_addr *ha; |
3454 | /* return error if we do not support writing to RAR table */ | 3457 | /* return error if we do not support writing to RAR table */ |
3455 | if (!hw->mac.ops.set_rar) | 3458 | if (!hw->mac.ops.set_rar) |
@@ -4419,7 +4422,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4419 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) | 4422 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) |
4420 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; | 4423 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; |
4421 | /* Flow Director hash filters enabled */ | 4424 | /* Flow Director hash filters enabled */ |
4422 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
4423 | adapter->atr_sample_rate = 20; | 4425 | adapter->atr_sample_rate = 20; |
4424 | adapter->ring_feature[RING_F_FDIR].limit = | 4426 | adapter->ring_feature[RING_F_FDIR].limit = |
4425 | IXGBE_MAX_FDIR_INDICES; | 4427 | IXGBE_MAX_FDIR_INDICES; |
@@ -4490,6 +4492,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4490 | hw->fc.send_xon = true; | 4492 | hw->fc.send_xon = true; |
4491 | hw->fc.disable_fc_autoneg = false; | 4493 | hw->fc.disable_fc_autoneg = false; |
4492 | 4494 | ||
4495 | #ifdef CONFIG_PCI_IOV | ||
4496 | /* assign number of SR-IOV VFs */ | ||
4497 | if (hw->mac.type != ixgbe_mac_82598EB) | ||
4498 | adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs; | ||
4499 | |||
4500 | #endif | ||
4493 | /* enable itr by default in dynamic mode */ | 4501 | /* enable itr by default in dynamic mode */ |
4494 | adapter->rx_itr_setting = 1; | 4502 | adapter->rx_itr_setting = 1; |
4495 | adapter->tx_itr_setting = 1; | 4503 | adapter->tx_itr_setting = 1; |
@@ -6695,12 +6703,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
6695 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 6703 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
6696 | struct ixgbe_hw *hw = &adapter->hw; | 6704 | struct ixgbe_hw *hw = &adapter->hw; |
6697 | 6705 | ||
6698 | /* Multiple traffic classes requires multiple queues */ | ||
6699 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { | ||
6700 | e_err(drv, "Enable failed, needs MSI-X\n"); | ||
6701 | return -EINVAL; | ||
6702 | } | ||
6703 | |||
6704 | /* Hardware supports up to 8 traffic classes */ | 6706 | /* Hardware supports up to 8 traffic classes */ |
6705 | if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || | 6707 | if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || |
6706 | (hw->mac.type == ixgbe_mac_82598EB && | 6708 | (hw->mac.type == ixgbe_mac_82598EB && |
@@ -6720,7 +6722,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
6720 | ixgbe_set_prio_tc_map(adapter); | 6722 | ixgbe_set_prio_tc_map(adapter); |
6721 | 6723 | ||
6722 | adapter->flags |= IXGBE_FLAG_DCB_ENABLED; | 6724 | adapter->flags |= IXGBE_FLAG_DCB_ENABLED; |
6723 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
6724 | 6725 | ||
6725 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 6726 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
6726 | adapter->last_lfc_mode = adapter->hw.fc.requested_mode; | 6727 | adapter->last_lfc_mode = adapter->hw.fc.requested_mode; |
@@ -6733,7 +6734,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
6733 | adapter->hw.fc.requested_mode = adapter->last_lfc_mode; | 6734 | adapter->hw.fc.requested_mode = adapter->last_lfc_mode; |
6734 | 6735 | ||
6735 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 6736 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
6736 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
6737 | 6737 | ||
6738 | adapter->temp_dcb_cfg.pfc_mode_enable = false; | 6738 | adapter->temp_dcb_cfg.pfc_mode_enable = false; |
6739 | adapter->dcb_cfg.pfc_mode_enable = false; | 6739 | adapter->dcb_cfg.pfc_mode_enable = false; |
@@ -6802,20 +6802,40 @@ static int ixgbe_set_features(struct net_device *netdev, | |||
6802 | * Check if Flow Director n-tuple support was enabled or disabled. If | 6802 | * Check if Flow Director n-tuple support was enabled or disabled. If |
6803 | * the state changed, we need to reset. | 6803 | * the state changed, we need to reset. |
6804 | */ | 6804 | */ |
6805 | if (!(features & NETIF_F_NTUPLE)) { | 6805 | switch (features & NETIF_F_NTUPLE) { |
6806 | if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { | 6806 | case NETIF_F_NTUPLE: |
6807 | /* turn off Flow Director, set ATR and reset */ | ||
6808 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && | ||
6809 | !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) | ||
6810 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
6811 | need_reset = true; | ||
6812 | } | ||
6813 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
6814 | } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { | ||
6815 | /* turn off ATR, enable perfect filters and reset */ | 6807 | /* turn off ATR, enable perfect filters and reset */ |
6808 | if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) | ||
6809 | need_reset = true; | ||
6810 | |||
6816 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 6811 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
6817 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 6812 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; |
6818 | need_reset = true; | 6813 | break; |
6814 | default: | ||
6815 | /* turn off perfect filters, enable ATR and reset */ | ||
6816 | if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
6817 | need_reset = true; | ||
6818 | |||
6819 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
6820 | |||
6821 | /* We cannot enable ATR if SR-IOV is enabled */ | ||
6822 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
6823 | break; | ||
6824 | |||
6825 | /* We cannot enable ATR if we have 2 or more traffic classes */ | ||
6826 | if (netdev_get_num_tc(netdev) > 1) | ||
6827 | break; | ||
6828 | |||
6829 | /* We cannot enable ATR if RSS is disabled */ | ||
6830 | if (adapter->ring_feature[RING_F_RSS].limit <= 1) | ||
6831 | break; | ||
6832 | |||
6833 | /* A sample rate of 0 indicates ATR disabled */ | ||
6834 | if (!adapter->atr_sample_rate) | ||
6835 | break; | ||
6836 | |||
6837 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
6838 | break; | ||
6819 | } | 6839 | } |
6820 | 6840 | ||
6821 | if (features & NETIF_F_HW_VLAN_RX) | 6841 | if (features & NETIF_F_HW_VLAN_RX) |
@@ -6839,7 +6859,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, | |||
6839 | u16 flags) | 6859 | u16 flags) |
6840 | { | 6860 | { |
6841 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 6861 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
6842 | int err = -EOPNOTSUPP; | 6862 | int err; |
6863 | |||
6864 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) | ||
6865 | return -EOPNOTSUPP; | ||
6843 | 6866 | ||
6844 | if (ndm->ndm_state & NUD_PERMANENT) { | 6867 | if (ndm->ndm_state & NUD_PERMANENT) { |
6845 | pr_info("%s: FDB only supports static addresses\n", | 6868 | pr_info("%s: FDB only supports static addresses\n", |
@@ -6847,13 +6870,17 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, | |||
6847 | return -EINVAL; | 6870 | return -EINVAL; |
6848 | } | 6871 | } |
6849 | 6872 | ||
6850 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | 6873 | if (is_unicast_ether_addr(addr)) { |
6851 | if (is_unicast_ether_addr(addr)) | 6874 | u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; |
6875 | |||
6876 | if (netdev_uc_count(dev) < rar_uc_entries) | ||
6852 | err = dev_uc_add_excl(dev, addr); | 6877 | err = dev_uc_add_excl(dev, addr); |
6853 | else if (is_multicast_ether_addr(addr)) | ||
6854 | err = dev_mc_add_excl(dev, addr); | ||
6855 | else | 6878 | else |
6856 | err = -EINVAL; | 6879 | err = -ENOMEM; |
6880 | } else if (is_multicast_ether_addr(addr)) { | ||
6881 | err = dev_mc_add_excl(dev, addr); | ||
6882 | } else { | ||
6883 | err = -EINVAL; | ||
6857 | } | 6884 | } |
6858 | 6885 | ||
6859 | /* Only return duplicate errors if NLM_F_EXCL is set */ | 6886 | /* Only return duplicate errors if NLM_F_EXCL is set */ |
@@ -6942,26 +6969,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
6942 | .ndo_fdb_dump = ixgbe_ndo_fdb_dump, | 6969 | .ndo_fdb_dump = ixgbe_ndo_fdb_dump, |
6943 | }; | 6970 | }; |
6944 | 6971 | ||
6945 | static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, | ||
6946 | const struct ixgbe_info *ii) | ||
6947 | { | ||
6948 | #ifdef CONFIG_PCI_IOV | ||
6949 | struct ixgbe_hw *hw = &adapter->hw; | ||
6950 | |||
6951 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
6952 | return; | ||
6953 | |||
6954 | /* The 82599 supports up to 64 VFs per physical function | ||
6955 | * but this implementation limits allocation to 63 so that | ||
6956 | * basic networking resources are still available to the | ||
6957 | * physical function. If the user requests greater thn | ||
6958 | * 63 VFs then it is an error - reset to default of zero. | ||
6959 | */ | ||
6960 | adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs; | ||
6961 | ixgbe_enable_sriov(adapter, ii); | ||
6962 | #endif /* CONFIG_PCI_IOV */ | ||
6963 | } | ||
6964 | |||
6965 | /** | 6972 | /** |
6966 | * ixgbe_wol_supported - Check whether device supports WoL | 6973 | * ixgbe_wol_supported - Check whether device supports WoL |
6967 | * @hw: hw specific details | 6974 | * @hw: hw specific details |
@@ -6988,6 +6995,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, | |||
6988 | if (hw->bus.func != 0) | 6995 | if (hw->bus.func != 0) |
6989 | break; | 6996 | break; |
6990 | case IXGBE_SUBDEV_ID_82599_SFP: | 6997 | case IXGBE_SUBDEV_ID_82599_SFP: |
6998 | case IXGBE_SUBDEV_ID_82599_RNDC: | ||
6991 | is_wol_supported = 1; | 6999 | is_wol_supported = 1; |
6992 | break; | 7000 | break; |
6993 | } | 7001 | } |
@@ -7035,6 +7043,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7035 | int i, err, pci_using_dac; | 7043 | int i, err, pci_using_dac; |
7036 | u8 part_str[IXGBE_PBANUM_LENGTH]; | 7044 | u8 part_str[IXGBE_PBANUM_LENGTH]; |
7037 | unsigned int indices = num_possible_cpus(); | 7045 | unsigned int indices = num_possible_cpus(); |
7046 | unsigned int dcb_max = 0; | ||
7038 | #ifdef IXGBE_FCOE | 7047 | #ifdef IXGBE_FCOE |
7039 | u16 device_caps; | 7048 | u16 device_caps; |
7040 | #endif | 7049 | #endif |
@@ -7084,15 +7093,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7084 | pci_save_state(pdev); | 7093 | pci_save_state(pdev); |
7085 | 7094 | ||
7086 | #ifdef CONFIG_IXGBE_DCB | 7095 | #ifdef CONFIG_IXGBE_DCB |
7087 | indices *= MAX_TRAFFIC_CLASS; | 7096 | if (ii->mac == ixgbe_mac_82598EB) |
7097 | dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, | ||
7098 | IXGBE_MAX_RSS_INDICES); | ||
7099 | else | ||
7100 | dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, | ||
7101 | IXGBE_MAX_FDIR_INDICES); | ||
7088 | #endif | 7102 | #endif |
7089 | 7103 | ||
7090 | if (ii->mac == ixgbe_mac_82598EB) | 7104 | if (ii->mac == ixgbe_mac_82598EB) |
7091 | #ifdef CONFIG_IXGBE_DCB | ||
7092 | indices = min_t(unsigned int, indices, MAX_TRAFFIC_CLASS * 4); | ||
7093 | #else | ||
7094 | indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); | 7105 | indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); |
7095 | #endif | ||
7096 | else | 7106 | else |
7097 | indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); | 7107 | indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); |
7098 | 7108 | ||
@@ -7100,6 +7110,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7100 | indices += min_t(unsigned int, num_possible_cpus(), | 7110 | indices += min_t(unsigned int, num_possible_cpus(), |
7101 | IXGBE_MAX_FCOE_INDICES); | 7111 | IXGBE_MAX_FCOE_INDICES); |
7102 | #endif | 7112 | #endif |
7113 | indices = max_t(unsigned int, dcb_max, indices); | ||
7103 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); | 7114 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); |
7104 | if (!netdev) { | 7115 | if (!netdev) { |
7105 | err = -ENOMEM; | 7116 | err = -ENOMEM; |
@@ -7206,8 +7217,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7206 | goto err_sw_init; | 7217 | goto err_sw_init; |
7207 | } | 7218 | } |
7208 | 7219 | ||
7209 | ixgbe_probe_vf(adapter, ii); | 7220 | #ifdef CONFIG_PCI_IOV |
7221 | ixgbe_enable_sriov(adapter, ii); | ||
7210 | 7222 | ||
7223 | #endif | ||
7211 | netdev->features = NETIF_F_SG | | 7224 | netdev->features = NETIF_F_SG | |
7212 | NETIF_F_IP_CSUM | | 7225 | NETIF_F_IP_CSUM | |
7213 | NETIF_F_IPV6_CSUM | | 7226 | NETIF_F_IPV6_CSUM | |
@@ -7411,8 +7424,7 @@ err_register: | |||
7411 | ixgbe_release_hw_control(adapter); | 7424 | ixgbe_release_hw_control(adapter); |
7412 | ixgbe_clear_interrupt_scheme(adapter); | 7425 | ixgbe_clear_interrupt_scheme(adapter); |
7413 | err_sw_init: | 7426 | err_sw_init: |
7414 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | 7427 | ixgbe_disable_sriov(adapter); |
7415 | ixgbe_disable_sriov(adapter); | ||
7416 | adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; | 7428 | adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; |
7417 | iounmap(hw->hw_addr); | 7429 | iounmap(hw->hw_addr); |
7418 | err_ioremap: | 7430 | err_ioremap: |
@@ -7465,13 +7477,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
7465 | if (netdev->reg_state == NETREG_REGISTERED) | 7477 | if (netdev->reg_state == NETREG_REGISTERED) |
7466 | unregister_netdev(netdev); | 7478 | unregister_netdev(netdev); |
7467 | 7479 | ||
7468 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | 7480 | ixgbe_disable_sriov(adapter); |
7469 | if (!(ixgbe_check_vf_assignment(adapter))) | ||
7470 | ixgbe_disable_sriov(adapter); | ||
7471 | else | ||
7472 | e_dev_warn("Unloading driver while VFs are assigned " | ||
7473 | "- VFs will not be deallocated\n"); | ||
7474 | } | ||
7475 | 7481 | ||
7476 | ixgbe_clear_interrupt_scheme(adapter); | 7482 | ixgbe_clear_interrupt_scheme(adapter); |
7477 | 7483 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index a825d4808cd2..47b2ce740ec1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
@@ -44,50 +44,15 @@ | |||
44 | #include "ixgbe_sriov.h" | 44 | #include "ixgbe_sriov.h" |
45 | 45 | ||
46 | #ifdef CONFIG_PCI_IOV | 46 | #ifdef CONFIG_PCI_IOV |
47 | static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter) | ||
48 | { | ||
49 | struct pci_dev *pdev = adapter->pdev; | ||
50 | struct pci_dev *pvfdev; | ||
51 | u16 vf_devfn = 0; | ||
52 | int device_id; | ||
53 | int vfs_found = 0; | ||
54 | |||
55 | switch (adapter->hw.mac.type) { | ||
56 | case ixgbe_mac_82599EB: | ||
57 | device_id = IXGBE_DEV_ID_82599_VF; | ||
58 | break; | ||
59 | case ixgbe_mac_X540: | ||
60 | device_id = IXGBE_DEV_ID_X540_VF; | ||
61 | break; | ||
62 | default: | ||
63 | device_id = 0; | ||
64 | break; | ||
65 | } | ||
66 | |||
67 | vf_devfn = pdev->devfn + 0x80; | ||
68 | pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); | ||
69 | while (pvfdev) { | ||
70 | if (pvfdev->devfn == vf_devfn && | ||
71 | (pvfdev->bus->number >= pdev->bus->number)) | ||
72 | vfs_found++; | ||
73 | vf_devfn += 2; | ||
74 | pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
75 | device_id, pvfdev); | ||
76 | } | ||
77 | |||
78 | return vfs_found; | ||
79 | } | ||
80 | |||
81 | void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, | 47 | void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, |
82 | const struct ixgbe_info *ii) | 48 | const struct ixgbe_info *ii) |
83 | { | 49 | { |
84 | struct ixgbe_hw *hw = &adapter->hw; | 50 | struct ixgbe_hw *hw = &adapter->hw; |
85 | int err = 0; | ||
86 | int num_vf_macvlans, i; | 51 | int num_vf_macvlans, i; |
87 | struct vf_macvlans *mv_list; | 52 | struct vf_macvlans *mv_list; |
88 | int pre_existing_vfs = 0; | 53 | int pre_existing_vfs = 0; |
89 | 54 | ||
90 | pre_existing_vfs = ixgbe_find_enabled_vfs(adapter); | 55 | pre_existing_vfs = pci_num_vf(adapter->pdev); |
91 | if (!pre_existing_vfs && !adapter->num_vfs) | 56 | if (!pre_existing_vfs && !adapter->num_vfs) |
92 | return; | 57 | return; |
93 | 58 | ||
@@ -106,10 +71,21 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, | |||
106 | "enabled for this device - Please reload all " | 71 | "enabled for this device - Please reload all " |
107 | "VF drivers to avoid spoofed packet errors\n"); | 72 | "VF drivers to avoid spoofed packet errors\n"); |
108 | } else { | 73 | } else { |
74 | int err; | ||
75 | /* | ||
76 | * The 82599 supports up to 64 VFs per physical function | ||
77 | * but this implementation limits allocation to 63 so that | ||
78 | * basic networking resources are still available to the | ||
79 | * physical function. If the user requests greater thn | ||
80 | * 63 VFs then it is an error - reset to default of zero. | ||
81 | */ | ||
82 | adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63); | ||
83 | |||
109 | err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); | 84 | err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); |
110 | if (err) { | 85 | if (err) { |
111 | e_err(probe, "Failed to enable PCI sriov: %d\n", err); | 86 | e_err(probe, "Failed to enable PCI sriov: %d\n", err); |
112 | goto err_novfs; | 87 | adapter->num_vfs = 0; |
88 | return; | ||
113 | } | 89 | } |
114 | } | 90 | } |
115 | 91 | ||
@@ -193,20 +169,48 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, | |||
193 | /* Oh oh */ | 169 | /* Oh oh */ |
194 | e_err(probe, "Unable to allocate memory for VF Data Storage - " | 170 | e_err(probe, "Unable to allocate memory for VF Data Storage - " |
195 | "SRIOV disabled\n"); | 171 | "SRIOV disabled\n"); |
196 | pci_disable_sriov(adapter->pdev); | 172 | ixgbe_disable_sriov(adapter); |
173 | } | ||
197 | 174 | ||
198 | err_novfs: | 175 | static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter) |
199 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; | 176 | { |
200 | adapter->num_vfs = 0; | 177 | struct pci_dev *pdev = adapter->pdev; |
178 | struct pci_dev *vfdev; | ||
179 | int dev_id; | ||
180 | |||
181 | switch (adapter->hw.mac.type) { | ||
182 | case ixgbe_mac_82599EB: | ||
183 | dev_id = IXGBE_DEV_ID_82599_VF; | ||
184 | break; | ||
185 | case ixgbe_mac_X540: | ||
186 | dev_id = IXGBE_DEV_ID_X540_VF; | ||
187 | break; | ||
188 | default: | ||
189 | return false; | ||
190 | } | ||
191 | |||
192 | /* loop through all the VFs to see if we own any that are assigned */ | ||
193 | vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL); | ||
194 | while (vfdev) { | ||
195 | /* if we don't own it we don't care */ | ||
196 | if (vfdev->is_virtfn && vfdev->physfn == pdev) { | ||
197 | /* if it is assigned we cannot release it */ | ||
198 | if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) | ||
199 | return true; | ||
200 | } | ||
201 | |||
202 | vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev); | ||
203 | } | ||
204 | |||
205 | return false; | ||
201 | } | 206 | } |
202 | #endif /* #ifdef CONFIG_PCI_IOV */ | ||
203 | 207 | ||
208 | #endif /* #ifdef CONFIG_PCI_IOV */ | ||
204 | void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) | 209 | void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) |
205 | { | 210 | { |
206 | struct ixgbe_hw *hw = &adapter->hw; | 211 | struct ixgbe_hw *hw = &adapter->hw; |
207 | u32 gpie; | 212 | u32 gpie; |
208 | u32 vmdctl; | 213 | u32 vmdctl; |
209 | int i; | ||
210 | 214 | ||
211 | /* set num VFs to 0 to prevent access to vfinfo */ | 215 | /* set num VFs to 0 to prevent access to vfinfo */ |
212 | adapter->num_vfs = 0; | 216 | adapter->num_vfs = 0; |
@@ -219,7 +223,20 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) | |||
219 | kfree(adapter->mv_list); | 223 | kfree(adapter->mv_list); |
220 | adapter->mv_list = NULL; | 224 | adapter->mv_list = NULL; |
221 | 225 | ||
226 | /* if SR-IOV is already disabled then there is nothing to do */ | ||
227 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) | ||
228 | return; | ||
229 | |||
222 | #ifdef CONFIG_PCI_IOV | 230 | #ifdef CONFIG_PCI_IOV |
231 | /* | ||
232 | * If our VFs are assigned we cannot shut down SR-IOV | ||
233 | * without causing issues, so just leave the hardware | ||
234 | * available but disabled | ||
235 | */ | ||
236 | if (ixgbe_vfs_are_assigned(adapter)) { | ||
237 | e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); | ||
238 | return; | ||
239 | |||
223 | /* disable iov and allow time for transactions to clear */ | 240 | /* disable iov and allow time for transactions to clear */ |
224 | pci_disable_sriov(adapter->pdev); | 241 | pci_disable_sriov(adapter->pdev); |
225 | #endif | 242 | #endif |
@@ -244,12 +261,6 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) | |||
244 | /* take a breather then clean up driver data */ | 261 | /* take a breather then clean up driver data */ |
245 | msleep(100); | 262 | msleep(100); |
246 | 263 | ||
247 | /* Release reference to VF devices */ | ||
248 | for (i = 0; i < adapter->num_vfs; i++) { | ||
249 | if (adapter->vfinfo[i].vfdev) | ||
250 | pci_dev_put(adapter->vfinfo[i].vfdev); | ||
251 | } | ||
252 | |||
253 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; | 264 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; |
254 | } | 265 | } |
255 | 266 | ||
@@ -483,28 +494,11 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, | |||
483 | return 0; | 494 | return 0; |
484 | } | 495 | } |
485 | 496 | ||
486 | int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter) | ||
487 | { | ||
488 | #ifdef CONFIG_PCI_IOV | ||
489 | int i; | ||
490 | for (i = 0; i < adapter->num_vfs; i++) { | ||
491 | if (adapter->vfinfo[i].vfdev->dev_flags & | ||
492 | PCI_DEV_FLAGS_ASSIGNED) | ||
493 | return true; | ||
494 | } | ||
495 | #endif | ||
496 | return false; | ||
497 | } | ||
498 | |||
499 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) | 497 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) |
500 | { | 498 | { |
501 | unsigned char vf_mac_addr[6]; | 499 | unsigned char vf_mac_addr[6]; |
502 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); | 500 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
503 | unsigned int vfn = (event_mask & 0x3f); | 501 | unsigned int vfn = (event_mask & 0x3f); |
504 | struct pci_dev *pvfdev; | ||
505 | unsigned int device_id; | ||
506 | u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) | | ||
507 | (pdev->devfn & 1); | ||
508 | 502 | ||
509 | bool enable = ((event_mask & 0x10000000U) != 0); | 503 | bool enable = ((event_mask & 0x10000000U) != 0); |
510 | 504 | ||
@@ -517,31 +511,6 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) | |||
517 | * for it later. | 511 | * for it later. |
518 | */ | 512 | */ |
519 | memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); | 513 | memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); |
520 | |||
521 | switch (adapter->hw.mac.type) { | ||
522 | case ixgbe_mac_82599EB: | ||
523 | device_id = IXGBE_DEV_ID_82599_VF; | ||
524 | break; | ||
525 | case ixgbe_mac_X540: | ||
526 | device_id = IXGBE_DEV_ID_X540_VF; | ||
527 | break; | ||
528 | default: | ||
529 | device_id = 0; | ||
530 | break; | ||
531 | } | ||
532 | |||
533 | pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); | ||
534 | while (pvfdev) { | ||
535 | if (pvfdev->devfn == thisvf_devfn) | ||
536 | break; | ||
537 | pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
538 | device_id, pvfdev); | ||
539 | } | ||
540 | if (pvfdev) | ||
541 | adapter->vfinfo[vfn].vfdev = pvfdev; | ||
542 | else | ||
543 | e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n", | ||
544 | thisvf_devfn); | ||
545 | } | 514 | } |
546 | 515 | ||
547 | return 0; | 516 | return 0; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 2ab38d5fda92..1be1d30e4e78 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h | |||
@@ -42,7 +42,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, | |||
42 | int vf, struct ifla_vf_info *ivi); | 42 | int vf, struct ifla_vf_info *ivi); |
43 | void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); | 43 | void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); |
44 | void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); | 44 | void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); |
45 | int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter); | ||
46 | #ifdef CONFIG_PCI_IOV | 45 | #ifdef CONFIG_PCI_IOV |
47 | void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, | 46 | void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, |
48 | const struct ixgbe_info *ii); | 47 | const struct ixgbe_info *ii); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index fe0a19d91d4a..400f86a31174 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | |||
@@ -54,6 +54,7 @@ | |||
54 | #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a | 54 | #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a |
55 | #define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 | 55 | #define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 |
56 | #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 | 56 | #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 |
57 | #define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 | ||
57 | #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 | 58 | #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 |
58 | #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 | 59 | #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 |
59 | #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D | 60 | #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index e167d1bb6dea..98cadb0c4dab 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | |||
@@ -249,6 +249,8 @@ struct ixgbevf_adapter { | |||
249 | bool link_up; | 249 | bool link_up; |
250 | 250 | ||
251 | struct work_struct watchdog_task; | 251 | struct work_struct watchdog_task; |
252 | |||
253 | spinlock_t mbx_lock; | ||
252 | }; | 254 | }; |
253 | 255 | ||
254 | enum ixbgevf_state_t { | 256 | enum ixbgevf_state_t { |
@@ -284,7 +286,6 @@ extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, | |||
284 | extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, | 286 | extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, |
285 | struct ixgbevf_ring *); | 287 | struct ixgbevf_ring *); |
286 | extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); | 288 | extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); |
287 | void ixgbevf_write_eitr(struct ixgbevf_q_vector *); | ||
288 | extern int ethtool_ioctl(struct ifreq *ifr); | 289 | extern int ethtool_ioctl(struct ifreq *ifr); |
289 | 290 | ||
290 | extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); | 291 | extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 2dc78d7e297a..3f9841d619ad 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -540,6 +540,25 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) | |||
540 | return 0; | 540 | return 0; |
541 | } | 541 | } |
542 | 542 | ||
543 | /** | ||
544 | * ixgbevf_write_eitr - write VTEITR register in hardware specific way | ||
545 | * @q_vector: structure containing interrupt and ring information | ||
546 | */ | ||
547 | static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) | ||
548 | { | ||
549 | struct ixgbevf_adapter *adapter = q_vector->adapter; | ||
550 | struct ixgbe_hw *hw = &adapter->hw; | ||
551 | int v_idx = q_vector->v_idx; | ||
552 | u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; | ||
553 | |||
554 | /* | ||
555 | * set the WDIS bit to not clear the timer bits and cause an | ||
556 | * immediate assertion of the interrupt | ||
557 | */ | ||
558 | itr_reg |= IXGBE_EITR_CNT_WDIS; | ||
559 | |||
560 | IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); | ||
561 | } | ||
543 | 562 | ||
544 | /** | 563 | /** |
545 | * ixgbevf_configure_msix - Configure MSI-X hardware | 564 | * ixgbevf_configure_msix - Configure MSI-X hardware |
@@ -662,30 +681,6 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, | |||
662 | ring_container->itr = itr_setting; | 681 | ring_container->itr = itr_setting; |
663 | } | 682 | } |
664 | 683 | ||
665 | /** | ||
666 | * ixgbevf_write_eitr - write VTEITR register in hardware specific way | ||
667 | * @q_vector: structure containing interrupt and ring information | ||
668 | * | ||
669 | * This function is made to be called by ethtool and by the driver | ||
670 | * when it needs to update VTEITR registers at runtime. Hardware | ||
671 | * specific quirks/differences are taken care of here. | ||
672 | */ | ||
673 | void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) | ||
674 | { | ||
675 | struct ixgbevf_adapter *adapter = q_vector->adapter; | ||
676 | struct ixgbe_hw *hw = &adapter->hw; | ||
677 | int v_idx = q_vector->v_idx; | ||
678 | u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; | ||
679 | |||
680 | /* | ||
681 | * set the WDIS bit to not clear the timer bits and cause an | ||
682 | * immediate assertion of the interrupt | ||
683 | */ | ||
684 | itr_reg |= IXGBE_EITR_CNT_WDIS; | ||
685 | |||
686 | IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); | ||
687 | } | ||
688 | |||
689 | static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) | 684 | static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) |
690 | { | 685 | { |
691 | u32 new_itr = q_vector->itr; | 686 | u32 new_itr = q_vector->itr; |
@@ -1120,9 +1115,14 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
1120 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | 1115 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
1121 | struct ixgbe_hw *hw = &adapter->hw; | 1116 | struct ixgbe_hw *hw = &adapter->hw; |
1122 | 1117 | ||
1118 | spin_lock(&adapter->mbx_lock); | ||
1119 | |||
1123 | /* add VID to filter table */ | 1120 | /* add VID to filter table */ |
1124 | if (hw->mac.ops.set_vfta) | 1121 | if (hw->mac.ops.set_vfta) |
1125 | hw->mac.ops.set_vfta(hw, vid, 0, true); | 1122 | hw->mac.ops.set_vfta(hw, vid, 0, true); |
1123 | |||
1124 | spin_unlock(&adapter->mbx_lock); | ||
1125 | |||
1126 | set_bit(vid, adapter->active_vlans); | 1126 | set_bit(vid, adapter->active_vlans); |
1127 | 1127 | ||
1128 | return 0; | 1128 | return 0; |
@@ -1133,9 +1133,14 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
1133 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | 1133 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
1134 | struct ixgbe_hw *hw = &adapter->hw; | 1134 | struct ixgbe_hw *hw = &adapter->hw; |
1135 | 1135 | ||
1136 | spin_lock(&adapter->mbx_lock); | ||
1137 | |||
1136 | /* remove VID from filter table */ | 1138 | /* remove VID from filter table */ |
1137 | if (hw->mac.ops.set_vfta) | 1139 | if (hw->mac.ops.set_vfta) |
1138 | hw->mac.ops.set_vfta(hw, vid, 0, false); | 1140 | hw->mac.ops.set_vfta(hw, vid, 0, false); |
1141 | |||
1142 | spin_unlock(&adapter->mbx_lock); | ||
1143 | |||
1139 | clear_bit(vid, adapter->active_vlans); | 1144 | clear_bit(vid, adapter->active_vlans); |
1140 | 1145 | ||
1141 | return 0; | 1146 | return 0; |
@@ -1190,11 +1195,15 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) | |||
1190 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | 1195 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
1191 | struct ixgbe_hw *hw = &adapter->hw; | 1196 | struct ixgbe_hw *hw = &adapter->hw; |
1192 | 1197 | ||
1198 | spin_lock(&adapter->mbx_lock); | ||
1199 | |||
1193 | /* reprogram multicast list */ | 1200 | /* reprogram multicast list */ |
1194 | if (hw->mac.ops.update_mc_addr_list) | 1201 | if (hw->mac.ops.update_mc_addr_list) |
1195 | hw->mac.ops.update_mc_addr_list(hw, netdev); | 1202 | hw->mac.ops.update_mc_addr_list(hw, netdev); |
1196 | 1203 | ||
1197 | ixgbevf_write_uc_addr_list(netdev); | 1204 | ixgbevf_write_uc_addr_list(netdev); |
1205 | |||
1206 | spin_unlock(&adapter->mbx_lock); | ||
1198 | } | 1207 | } |
1199 | 1208 | ||
1200 | static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) | 1209 | static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) |
@@ -1339,6 +1348,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) | |||
1339 | 1348 | ||
1340 | ixgbevf_configure_msix(adapter); | 1349 | ixgbevf_configure_msix(adapter); |
1341 | 1350 | ||
1351 | spin_lock(&adapter->mbx_lock); | ||
1352 | |||
1342 | if (hw->mac.ops.set_rar) { | 1353 | if (hw->mac.ops.set_rar) { |
1343 | if (is_valid_ether_addr(hw->mac.addr)) | 1354 | if (is_valid_ether_addr(hw->mac.addr)) |
1344 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); | 1355 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); |
@@ -1350,6 +1361,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) | |||
1350 | msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | 1361 | msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
1351 | hw->mbx.ops.write_posted(hw, msg, 2); | 1362 | hw->mbx.ops.write_posted(hw, msg, 2); |
1352 | 1363 | ||
1364 | spin_unlock(&adapter->mbx_lock); | ||
1365 | |||
1353 | clear_bit(__IXGBEVF_DOWN, &adapter->state); | 1366 | clear_bit(__IXGBEVF_DOWN, &adapter->state); |
1354 | ixgbevf_napi_enable_all(adapter); | 1367 | ixgbevf_napi_enable_all(adapter); |
1355 | 1368 | ||
@@ -1562,11 +1575,15 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) | |||
1562 | struct ixgbe_hw *hw = &adapter->hw; | 1575 | struct ixgbe_hw *hw = &adapter->hw; |
1563 | struct net_device *netdev = adapter->netdev; | 1576 | struct net_device *netdev = adapter->netdev; |
1564 | 1577 | ||
1578 | spin_lock(&adapter->mbx_lock); | ||
1579 | |||
1565 | if (hw->mac.ops.reset_hw(hw)) | 1580 | if (hw->mac.ops.reset_hw(hw)) |
1566 | hw_dbg(hw, "PF still resetting\n"); | 1581 | hw_dbg(hw, "PF still resetting\n"); |
1567 | else | 1582 | else |
1568 | hw->mac.ops.init_hw(hw); | 1583 | hw->mac.ops.init_hw(hw); |
1569 | 1584 | ||
1585 | spin_unlock(&adapter->mbx_lock); | ||
1586 | |||
1570 | if (is_valid_ether_addr(adapter->hw.mac.addr)) { | 1587 | if (is_valid_ether_addr(adapter->hw.mac.addr)) { |
1571 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, | 1588 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, |
1572 | netdev->addr_len); | 1589 | netdev->addr_len); |
@@ -1893,6 +1910,9 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) | |||
1893 | adapter->netdev->addr_len); | 1910 | adapter->netdev->addr_len); |
1894 | } | 1911 | } |
1895 | 1912 | ||
1913 | /* lock to protect mailbox accesses */ | ||
1914 | spin_lock_init(&adapter->mbx_lock); | ||
1915 | |||
1896 | /* Enable dynamic interrupt throttling rates */ | 1916 | /* Enable dynamic interrupt throttling rates */ |
1897 | adapter->rx_itr_setting = 1; | 1917 | adapter->rx_itr_setting = 1; |
1898 | adapter->tx_itr_setting = 1; | 1918 | adapter->tx_itr_setting = 1; |
@@ -2032,8 +2052,16 @@ static void ixgbevf_watchdog_task(struct work_struct *work) | |||
2032 | * no LSC interrupt | 2052 | * no LSC interrupt |
2033 | */ | 2053 | */ |
2034 | if (hw->mac.ops.check_link) { | 2054 | if (hw->mac.ops.check_link) { |
2035 | if ((hw->mac.ops.check_link(hw, &link_speed, | 2055 | s32 need_reset; |
2036 | &link_up, false)) != 0) { | 2056 | |
2057 | spin_lock(&adapter->mbx_lock); | ||
2058 | |||
2059 | need_reset = hw->mac.ops.check_link(hw, &link_speed, | ||
2060 | &link_up, false); | ||
2061 | |||
2062 | spin_unlock(&adapter->mbx_lock); | ||
2063 | |||
2064 | if (need_reset) { | ||
2037 | adapter->link_up = link_up; | 2065 | adapter->link_up = link_up; |
2038 | adapter->link_speed = link_speed; | 2066 | adapter->link_speed = link_speed; |
2039 | netif_carrier_off(netdev); | 2067 | netif_carrier_off(netdev); |
@@ -2813,9 +2841,13 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) | |||
2813 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 2841 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
2814 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); | 2842 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); |
2815 | 2843 | ||
2844 | spin_lock(&adapter->mbx_lock); | ||
2845 | |||
2816 | if (hw->mac.ops.set_rar) | 2846 | if (hw->mac.ops.set_rar) |
2817 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); | 2847 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); |
2818 | 2848 | ||
2849 | spin_unlock(&adapter->mbx_lock); | ||
2850 | |||
2819 | return 0; | 2851 | return 0; |
2820 | } | 2852 | } |
2821 | 2853 | ||
@@ -3152,12 +3184,92 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev) | |||
3152 | pci_disable_device(pdev); | 3184 | pci_disable_device(pdev); |
3153 | } | 3185 | } |
3154 | 3186 | ||
3187 | /** | ||
3188 | * ixgbevf_io_error_detected - called when PCI error is detected | ||
3189 | * @pdev: Pointer to PCI device | ||
3190 | * @state: The current pci connection state | ||
3191 | * | ||
3192 | * This function is called after a PCI bus error affecting | ||
3193 | * this device has been detected. | ||
3194 | */ | ||
3195 | static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, | ||
3196 | pci_channel_state_t state) | ||
3197 | { | ||
3198 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
3199 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | ||
3200 | |||
3201 | netif_device_detach(netdev); | ||
3202 | |||
3203 | if (state == pci_channel_io_perm_failure) | ||
3204 | return PCI_ERS_RESULT_DISCONNECT; | ||
3205 | |||
3206 | if (netif_running(netdev)) | ||
3207 | ixgbevf_down(adapter); | ||
3208 | |||
3209 | pci_disable_device(pdev); | ||
3210 | |||
3211 | /* Request a slot slot reset. */ | ||
3212 | return PCI_ERS_RESULT_NEED_RESET; | ||
3213 | } | ||
3214 | |||
3215 | /** | ||
3216 | * ixgbevf_io_slot_reset - called after the pci bus has been reset. | ||
3217 | * @pdev: Pointer to PCI device | ||
3218 | * | ||
3219 | * Restart the card from scratch, as if from a cold-boot. Implementation | ||
3220 | * resembles the first-half of the ixgbevf_resume routine. | ||
3221 | */ | ||
3222 | static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) | ||
3223 | { | ||
3224 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
3225 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | ||
3226 | |||
3227 | if (pci_enable_device_mem(pdev)) { | ||
3228 | dev_err(&pdev->dev, | ||
3229 | "Cannot re-enable PCI device after reset.\n"); | ||
3230 | return PCI_ERS_RESULT_DISCONNECT; | ||
3231 | } | ||
3232 | |||
3233 | pci_set_master(pdev); | ||
3234 | |||
3235 | ixgbevf_reset(adapter); | ||
3236 | |||
3237 | return PCI_ERS_RESULT_RECOVERED; | ||
3238 | } | ||
3239 | |||
3240 | /** | ||
3241 | * ixgbevf_io_resume - called when traffic can start flowing again. | ||
3242 | * @pdev: Pointer to PCI device | ||
3243 | * | ||
3244 | * This callback is called when the error recovery driver tells us that | ||
3245 | * its OK to resume normal operation. Implementation resembles the | ||
3246 | * second-half of the ixgbevf_resume routine. | ||
3247 | */ | ||
3248 | static void ixgbevf_io_resume(struct pci_dev *pdev) | ||
3249 | { | ||
3250 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
3251 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | ||
3252 | |||
3253 | if (netif_running(netdev)) | ||
3254 | ixgbevf_up(adapter); | ||
3255 | |||
3256 | netif_device_attach(netdev); | ||
3257 | } | ||
3258 | |||
3259 | /* PCI Error Recovery (ERS) */ | ||
3260 | static struct pci_error_handlers ixgbevf_err_handler = { | ||
3261 | .error_detected = ixgbevf_io_error_detected, | ||
3262 | .slot_reset = ixgbevf_io_slot_reset, | ||
3263 | .resume = ixgbevf_io_resume, | ||
3264 | }; | ||
3265 | |||
3155 | static struct pci_driver ixgbevf_driver = { | 3266 | static struct pci_driver ixgbevf_driver = { |
3156 | .name = ixgbevf_driver_name, | 3267 | .name = ixgbevf_driver_name, |
3157 | .id_table = ixgbevf_pci_tbl, | 3268 | .id_table = ixgbevf_pci_tbl, |
3158 | .probe = ixgbevf_probe, | 3269 | .probe = ixgbevf_probe, |
3159 | .remove = __devexit_p(ixgbevf_remove), | 3270 | .remove = __devexit_p(ixgbevf_remove), |
3160 | .shutdown = ixgbevf_shutdown, | 3271 | .shutdown = ixgbevf_shutdown, |
3272 | .err_handler = &ixgbevf_err_handler | ||
3161 | }; | 3273 | }; |
3162 | 3274 | ||
3163 | /** | 3275 | /** |