diff options
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/Makefile | 3 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 54 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_82599.c | 233 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.c | 19 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.h | 2 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 195 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_fcoe.c | 4 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 676 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_mbx.c | 479 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_mbx.h | 96 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_sriov.c | 362 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_sriov.h | 47 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_type.h | 66 |
13 files changed, 1944 insertions, 292 deletions
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index bfef0ebcba9..8f81efb4916 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile | |||
@@ -33,7 +33,8 @@ | |||
33 | obj-$(CONFIG_IXGBE) += ixgbe.o | 33 | obj-$(CONFIG_IXGBE) += ixgbe.o |
34 | 34 | ||
35 | ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ | 35 | ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ |
36 | ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o | 36 | ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ |
37 | ixgbe_mbx.o | ||
37 | 38 | ||
38 | ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ | 39 | ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ |
39 | ixgbe_dcb_82599.o ixgbe_dcb_nl.o | 40 | ixgbe_dcb_82599.o ixgbe_dcb_nl.o |
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 303e7bd39b6..19e94ee155a 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -98,6 +98,22 @@ | |||
98 | 98 | ||
99 | #define IXGBE_MAX_RSC_INT_RATE 162760 | 99 | #define IXGBE_MAX_RSC_INT_RATE 162760 |
100 | 100 | ||
101 | #define IXGBE_MAX_VF_MC_ENTRIES 30 | ||
102 | #define IXGBE_MAX_VF_FUNCTIONS 64 | ||
103 | #define IXGBE_MAX_VFTA_ENTRIES 128 | ||
104 | #define MAX_EMULATION_MAC_ADDRS 16 | ||
105 | #define VMDQ_P(p) ((p) + adapter->num_vfs) | ||
106 | |||
107 | struct vf_data_storage { | ||
108 | unsigned char vf_mac_addresses[ETH_ALEN]; | ||
109 | u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; | ||
110 | u16 num_vf_mc_hashes; | ||
111 | u16 default_vf_vlan_id; | ||
112 | u16 vlans_enabled; | ||
113 | bool clear_to_send; | ||
114 | int rar; | ||
115 | }; | ||
116 | |||
101 | /* wrapper around a pointer to a socket buffer, | 117 | /* wrapper around a pointer to a socket buffer, |
102 | * so a DMA handle can be stored along with the buffer */ | 118 | * so a DMA handle can be stored along with the buffer */ |
103 | struct ixgbe_tx_buffer { | 119 | struct ixgbe_tx_buffer { |
@@ -159,6 +175,7 @@ struct ixgbe_ring { | |||
159 | 175 | ||
160 | struct ixgbe_queue_stats stats; | 176 | struct ixgbe_queue_stats stats; |
161 | unsigned long reinit_state; | 177 | unsigned long reinit_state; |
178 | int numa_node; | ||
162 | u64 rsc_count; /* stat for coalesced packets */ | 179 | u64 rsc_count; /* stat for coalesced packets */ |
163 | u64 rsc_flush; /* stats for flushed packets */ | 180 | u64 rsc_flush; /* stats for flushed packets */ |
164 | u32 restart_queue; /* track tx queue restarts */ | 181 | u32 restart_queue; /* track tx queue restarts */ |
@@ -171,7 +188,7 @@ struct ixgbe_ring { | |||
171 | enum ixgbe_ring_f_enum { | 188 | enum ixgbe_ring_f_enum { |
172 | RING_F_NONE = 0, | 189 | RING_F_NONE = 0, |
173 | RING_F_DCB, | 190 | RING_F_DCB, |
174 | RING_F_VMDQ, | 191 | RING_F_VMDQ, /* SR-IOV uses the same ring feature */ |
175 | RING_F_RSS, | 192 | RING_F_RSS, |
176 | RING_F_FDIR, | 193 | RING_F_FDIR, |
177 | #ifdef IXGBE_FCOE | 194 | #ifdef IXGBE_FCOE |
@@ -183,7 +200,7 @@ enum ixgbe_ring_f_enum { | |||
183 | 200 | ||
184 | #define IXGBE_MAX_DCB_INDICES 8 | 201 | #define IXGBE_MAX_DCB_INDICES 8 |
185 | #define IXGBE_MAX_RSS_INDICES 16 | 202 | #define IXGBE_MAX_RSS_INDICES 16 |
186 | #define IXGBE_MAX_VMDQ_INDICES 16 | 203 | #define IXGBE_MAX_VMDQ_INDICES 64 |
187 | #define IXGBE_MAX_FDIR_INDICES 64 | 204 | #define IXGBE_MAX_FDIR_INDICES 64 |
188 | #ifdef IXGBE_FCOE | 205 | #ifdef IXGBE_FCOE |
189 | #define IXGBE_MAX_FCOE_INDICES 8 | 206 | #define IXGBE_MAX_FCOE_INDICES 8 |
@@ -277,7 +294,7 @@ struct ixgbe_adapter { | |||
277 | u16 eitr_high; | 294 | u16 eitr_high; |
278 | 295 | ||
279 | /* TX */ | 296 | /* TX */ |
280 | struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */ | 297 | struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; |
281 | int num_tx_queues; | 298 | int num_tx_queues; |
282 | u32 tx_timeout_count; | 299 | u32 tx_timeout_count; |
283 | bool detect_tx_hung; | 300 | bool detect_tx_hung; |
@@ -286,8 +303,10 @@ struct ixgbe_adapter { | |||
286 | u64 lsc_int; | 303 | u64 lsc_int; |
287 | 304 | ||
288 | /* RX */ | 305 | /* RX */ |
289 | struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */ | 306 | struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp; |
290 | int num_rx_queues; | 307 | int num_rx_queues; |
308 | int num_rx_pools; /* == num_rx_queues in 82598 */ | ||
309 | int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ | ||
291 | u64 hw_csum_rx_error; | 310 | u64 hw_csum_rx_error; |
292 | u64 hw_rx_no_dma_resources; | 311 | u64 hw_rx_no_dma_resources; |
293 | u64 non_eop_descs; | 312 | u64 non_eop_descs; |
@@ -323,13 +342,14 @@ struct ixgbe_adapter { | |||
323 | #define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) | 342 | #define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) |
324 | #define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) | 343 | #define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) |
325 | #define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) | 344 | #define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) |
326 | #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) | 345 | #define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 23) |
327 | #define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24) | 346 | #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 24) |
328 | #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) | 347 | #define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 25) |
329 | #define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26) | 348 | #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 26) |
330 | #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) | 349 | #define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 27) |
331 | #define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28) | 350 | #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 28) |
332 | #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) | 351 | #define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 29) |
352 | #define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 30) | ||
333 | 353 | ||
334 | u32 flags2; | 354 | u32 flags2; |
335 | #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) | 355 | #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) |
@@ -379,6 +399,13 @@ struct ixgbe_adapter { | |||
379 | u64 rsc_total_flush; | 399 | u64 rsc_total_flush; |
380 | u32 wol; | 400 | u32 wol; |
381 | u16 eeprom_version; | 401 | u16 eeprom_version; |
402 | |||
403 | int node; | ||
404 | |||
405 | /* SR-IOV */ | ||
406 | DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); | ||
407 | unsigned int num_vfs; | ||
408 | struct vf_data_storage *vfinfo; | ||
382 | }; | 409 | }; |
383 | 410 | ||
384 | enum ixbge_state_t { | 411 | enum ixbge_state_t { |
@@ -426,6 +453,10 @@ extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); | |||
426 | extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, | 453 | extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, |
427 | struct ixgbe_atr_input *input, | 454 | struct ixgbe_atr_input *input, |
428 | u8 queue); | 455 | u8 queue); |
456 | extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, | ||
457 | struct ixgbe_atr_input *input, | ||
458 | struct ixgbe_atr_input_masks *input_masks, | ||
459 | u16 soft_id, u8 queue); | ||
429 | extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, | 460 | extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, |
430 | u16 vlan_id); | 461 | u16 vlan_id); |
431 | extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, | 462 | extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, |
@@ -440,6 +471,7 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, | |||
440 | u16 flex_byte); | 471 | u16 flex_byte); |
441 | extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, | 472 | extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, |
442 | u8 l4type); | 473 | u8 l4type); |
474 | extern void ixgbe_set_rx_mode(struct net_device *netdev); | ||
443 | #ifdef IXGBE_FCOE | 475 | #ifdef IXGBE_FCOE |
444 | extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); | 476 | extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); |
445 | extern int ixgbe_fso(struct ixgbe_adapter *adapter, | 477 | extern int ixgbe_fso(struct ixgbe_adapter *adapter, |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index b49bd6b9feb..1f30e163bd9 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include "ixgbe.h" | 32 | #include "ixgbe.h" |
33 | #include "ixgbe_phy.h" | 33 | #include "ixgbe_phy.h" |
34 | #include "ixgbe_mbx.h" | ||
34 | 35 | ||
35 | #define IXGBE_82599_MAX_TX_QUEUES 128 | 36 | #define IXGBE_82599_MAX_TX_QUEUES 128 |
36 | #define IXGBE_82599_MAX_RX_QUEUES 128 | 37 | #define IXGBE_82599_MAX_RX_QUEUES 128 |
@@ -889,7 +890,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, | |||
889 | static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) | 890 | static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) |
890 | { | 891 | { |
891 | s32 status = 0; | 892 | s32 status = 0; |
892 | u32 ctrl, ctrl_ext; | 893 | u32 ctrl; |
893 | u32 i; | 894 | u32 i; |
894 | u32 autoc; | 895 | u32 autoc; |
895 | u32 autoc2; | 896 | u32 autoc2; |
@@ -944,15 +945,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) | |||
944 | status = IXGBE_ERR_RESET_FAILED; | 945 | status = IXGBE_ERR_RESET_FAILED; |
945 | hw_dbg(hw, "Reset polling failed to complete.\n"); | 946 | hw_dbg(hw, "Reset polling failed to complete.\n"); |
946 | } | 947 | } |
947 | /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ | ||
948 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | ||
949 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; | ||
950 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); | ||
951 | 948 | ||
952 | msleep(50); | 949 | msleep(50); |
953 | 950 | ||
954 | |||
955 | |||
956 | /* | 951 | /* |
957 | * Store the original AUTOC/AUTOC2 values if they have not been | 952 | * Store the original AUTOC/AUTOC2 values if they have not been |
958 | * stored off yet. Otherwise restore the stored original | 953 | * stored off yet. Otherwise restore the stored original |
@@ -1095,9 +1090,11 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind, | |||
1095 | bool vlan_on) | 1090 | bool vlan_on) |
1096 | { | 1091 | { |
1097 | u32 regindex; | 1092 | u32 regindex; |
1093 | u32 vlvf_index; | ||
1098 | u32 bitindex; | 1094 | u32 bitindex; |
1099 | u32 bits; | 1095 | u32 bits; |
1100 | u32 first_empty_slot; | 1096 | u32 first_empty_slot; |
1097 | u32 vt_ctl; | ||
1101 | 1098 | ||
1102 | if (vlan > 4095) | 1099 | if (vlan > 4095) |
1103 | return IXGBE_ERR_PARAM; | 1100 | return IXGBE_ERR_PARAM; |
@@ -1124,76 +1121,84 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind, | |||
1124 | 1121 | ||
1125 | 1122 | ||
1126 | /* Part 2 | 1123 | /* Part 2 |
1127 | * If the vind is set | 1124 | * If VT mode is set |
1128 | * Either vlan_on | 1125 | * Either vlan_on |
1129 | * make sure the vlan is in VLVF | 1126 | * make sure the vlan is in VLVF |
1130 | * set the vind bit in the matching VLVFB | 1127 | * set the vind bit in the matching VLVFB |
1131 | * Or !vlan_on | 1128 | * Or !vlan_on |
1132 | * clear the pool bit and possibly the vind | 1129 | * clear the pool bit and possibly the vind |
1133 | */ | 1130 | */ |
1134 | if (vind) { | 1131 | vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); |
1135 | /* find the vlanid or the first empty slot */ | 1132 | if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE)) |
1136 | first_empty_slot = 0; | 1133 | goto out; |
1137 | |||
1138 | for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { | ||
1139 | bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); | ||
1140 | if (!bits && !first_empty_slot) | ||
1141 | first_empty_slot = regindex; | ||
1142 | else if ((bits & 0x0FFF) == vlan) | ||
1143 | break; | ||
1144 | } | ||
1145 | 1134 | ||
1146 | if (regindex >= IXGBE_VLVF_ENTRIES) { | 1135 | /* find the vlanid or the first empty slot */ |
1147 | if (first_empty_slot) | 1136 | first_empty_slot = 0; |
1148 | regindex = first_empty_slot; | 1137 | |
1149 | else { | 1138 | for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) { |
1150 | hw_dbg(hw, "No space in VLVF.\n"); | 1139 | bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index)); |
1151 | goto out; | 1140 | if (!bits && !first_empty_slot) |
1152 | } | 1141 | first_empty_slot = vlvf_index; |
1142 | else if ((bits & 0x0FFF) == vlan) | ||
1143 | break; | ||
1144 | } | ||
1145 | |||
1146 | if (vlvf_index >= IXGBE_VLVF_ENTRIES) { | ||
1147 | if (first_empty_slot) | ||
1148 | vlvf_index = first_empty_slot; | ||
1149 | else { | ||
1150 | hw_dbg(hw, "No space in VLVF.\n"); | ||
1151 | goto out; | ||
1153 | } | 1152 | } |
1153 | } | ||
1154 | 1154 | ||
1155 | if (vlan_on) { | 1155 | if (vlan_on) { |
1156 | /* set the pool bit */ | 1156 | /* set the pool bit */ |
1157 | if (vind < 32) { | 1157 | if (vind < 32) { |
1158 | bits = IXGBE_READ_REG(hw, | 1158 | bits = IXGBE_READ_REG(hw, |
1159 | IXGBE_VLVFB(regindex * 2)); | 1159 | IXGBE_VLVFB(vlvf_index * 2)); |
1160 | bits |= (1 << vind); | 1160 | bits |= (1 << vind); |
1161 | IXGBE_WRITE_REG(hw, | 1161 | IXGBE_WRITE_REG(hw, |
1162 | IXGBE_VLVFB(regindex * 2), bits); | 1162 | IXGBE_VLVFB(vlvf_index * 2), bits); |
1163 | } else { | ||
1164 | bits = IXGBE_READ_REG(hw, | ||
1165 | IXGBE_VLVFB((regindex * 2) + 1)); | ||
1166 | bits |= (1 << vind); | ||
1167 | IXGBE_WRITE_REG(hw, | ||
1168 | IXGBE_VLVFB((regindex * 2) + 1), bits); | ||
1169 | } | ||
1170 | } else { | 1163 | } else { |
1171 | /* clear the pool bit */ | 1164 | bits = IXGBE_READ_REG(hw, |
1172 | if (vind < 32) { | 1165 | IXGBE_VLVFB((vlvf_index * 2) + 1)); |
1173 | bits = IXGBE_READ_REG(hw, | 1166 | bits |= (1 << (vind - 32)); |
1174 | IXGBE_VLVFB(regindex * 2)); | 1167 | IXGBE_WRITE_REG(hw, |
1168 | IXGBE_VLVFB((vlvf_index * 2) + 1), bits); | ||
1169 | } | ||
1170 | } else { | ||
1171 | /* clear the pool bit */ | ||
1172 | if (vind < 32) { | ||
1173 | bits = IXGBE_READ_REG(hw, | ||
1174 | IXGBE_VLVFB(vlvf_index * 2)); | ||
1175 | bits &= ~(1 << vind); | 1175 | bits &= ~(1 << vind); |
1176 | IXGBE_WRITE_REG(hw, | 1176 | IXGBE_WRITE_REG(hw, |
1177 | IXGBE_VLVFB(regindex * 2), bits); | 1177 | IXGBE_VLVFB(vlvf_index * 2), bits); |
1178 | bits |= IXGBE_READ_REG(hw, | 1178 | bits |= IXGBE_READ_REG(hw, |
1179 | IXGBE_VLVFB((regindex * 2) + 1)); | 1179 | IXGBE_VLVFB((vlvf_index * 2) + 1)); |
1180 | } else { | 1180 | } else { |
1181 | bits = IXGBE_READ_REG(hw, | 1181 | bits = IXGBE_READ_REG(hw, |
1182 | IXGBE_VLVFB((regindex * 2) + 1)); | 1182 | IXGBE_VLVFB((vlvf_index * 2) + 1)); |
1183 | bits &= ~(1 << vind); | 1183 | bits &= ~(1 << (vind - 32)); |
1184 | IXGBE_WRITE_REG(hw, | 1184 | IXGBE_WRITE_REG(hw, |
1185 | IXGBE_VLVFB((regindex * 2) + 1), bits); | 1185 | IXGBE_VLVFB((vlvf_index * 2) + 1), bits); |
1186 | bits |= IXGBE_READ_REG(hw, | 1186 | bits |= IXGBE_READ_REG(hw, |
1187 | IXGBE_VLVFB(regindex * 2)); | 1187 | IXGBE_VLVFB(vlvf_index * 2)); |
1188 | } | ||
1189 | } | 1188 | } |
1189 | } | ||
1190 | 1190 | ||
1191 | if (bits) | 1191 | if (bits) { |
1192 | IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), | 1192 | IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), |
1193 | (IXGBE_VLVF_VIEN | vlan)); | 1193 | (IXGBE_VLVF_VIEN | vlan)); |
1194 | else | 1194 | /* if bits is non-zero then some pools/VFs are still |
1195 | IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0); | 1195 | * using this VLAN ID. Force the VFTA entry to on */ |
1196 | bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); | ||
1197 | bits |= (1 << bitindex); | ||
1198 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); | ||
1196 | } | 1199 | } |
1200 | else | ||
1201 | IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); | ||
1197 | 1202 | ||
1198 | out: | 1203 | out: |
1199 | return 0; | 1204 | return 0; |
@@ -1434,6 +1439,9 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) | |||
1434 | /* Send interrupt when 64 filters are left */ | 1439 | /* Send interrupt when 64 filters are left */ |
1435 | fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; | 1440 | fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; |
1436 | 1441 | ||
1442 | /* Initialize the drop queue to Rx queue 127 */ | ||
1443 | fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT); | ||
1444 | |||
1437 | switch (pballoc) { | 1445 | switch (pballoc) { |
1438 | case IXGBE_FDIR_PBALLOC_64K: | 1446 | case IXGBE_FDIR_PBALLOC_64K: |
1439 | /* 2k - 1 perfect filters */ | 1447 | /* 2k - 1 perfect filters */ |
@@ -1675,8 +1683,8 @@ s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr) | |||
1675 | * @src_addr_4: the fourth 4 bytes of the IP address to load | 1683 | * @src_addr_4: the fourth 4 bytes of the IP address to load |
1676 | **/ | 1684 | **/ |
1677 | s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, | 1685 | s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, |
1678 | u32 src_addr_1, u32 src_addr_2, | 1686 | u32 src_addr_1, u32 src_addr_2, |
1679 | u32 src_addr_3, u32 src_addr_4) | 1687 | u32 src_addr_3, u32 src_addr_4) |
1680 | { | 1688 | { |
1681 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff; | 1689 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff; |
1682 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] = | 1690 | input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] = |
@@ -1718,8 +1726,8 @@ s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, | |||
1718 | * @dst_addr_4: the fourth 4 bytes of the IP address to load | 1726 | * @dst_addr_4: the fourth 4 bytes of the IP address to load |
1719 | **/ | 1727 | **/ |
1720 | s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, | 1728 | s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, |
1721 | u32 dst_addr_1, u32 dst_addr_2, | 1729 | u32 dst_addr_1, u32 dst_addr_2, |
1722 | u32 dst_addr_3, u32 dst_addr_4) | 1730 | u32 dst_addr_3, u32 dst_addr_4) |
1723 | { | 1731 | { |
1724 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff; | 1732 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff; |
1725 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] = | 1733 | input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] = |
@@ -1797,7 +1805,7 @@ s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte) | |||
1797 | * @vm_pool: the Virtual Machine pool to load | 1805 | * @vm_pool: the Virtual Machine pool to load |
1798 | **/ | 1806 | **/ |
1799 | s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, | 1807 | s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, |
1800 | u8 vm_pool) | 1808 | u8 vm_pool) |
1801 | { | 1809 | { |
1802 | input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool; | 1810 | input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool; |
1803 | 1811 | ||
@@ -1821,8 +1829,7 @@ s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type) | |||
1821 | * @input: input stream to search | 1829 | * @input: input stream to search |
1822 | * @vlan: the VLAN id to load | 1830 | * @vlan: the VLAN id to load |
1823 | **/ | 1831 | **/ |
1824 | static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, | 1832 | static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan) |
1825 | u16 *vlan) | ||
1826 | { | 1833 | { |
1827 | *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; | 1834 | *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; |
1828 | *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; | 1835 | *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; |
@@ -2078,23 +2085,26 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, | |||
2078 | * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter | 2085 | * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter |
2079 | * @hw: pointer to hardware structure | 2086 | * @hw: pointer to hardware structure |
2080 | * @input: input bitstream | 2087 | * @input: input bitstream |
2088 | * @input_masks: bitwise masks for relevant fields | ||
2089 | * @soft_id: software index into the silicon hash tables for filter storage | ||
2081 | * @queue: queue index to direct traffic to | 2090 | * @queue: queue index to direct traffic to |
2082 | * | 2091 | * |
2083 | * Note that the caller to this function must lock before calling, since the | 2092 | * Note that the caller to this function must lock before calling, since the |
2084 | * hardware writes must be protected from one another. | 2093 | * hardware writes must be protected from one another. |
2085 | **/ | 2094 | **/ |
2086 | s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, | 2095 | s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, |
2087 | struct ixgbe_atr_input *input, | 2096 | struct ixgbe_atr_input *input, |
2088 | u16 soft_id, | 2097 | struct ixgbe_atr_input_masks *input_masks, |
2089 | u8 queue) | 2098 | u16 soft_id, u8 queue) |
2090 | { | 2099 | { |
2091 | u32 fdircmd = 0; | 2100 | u32 fdircmd = 0; |
2092 | u32 fdirhash; | 2101 | u32 fdirhash; |
2093 | u32 src_ipv4, dst_ipv4; | 2102 | u32 src_ipv4 = 0, dst_ipv4 = 0; |
2094 | u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; | 2103 | u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; |
2095 | u16 src_port, dst_port, vlan_id, flex_bytes; | 2104 | u16 src_port, dst_port, vlan_id, flex_bytes; |
2096 | u16 bucket_hash; | 2105 | u16 bucket_hash; |
2097 | u8 l4type; | 2106 | u8 l4type; |
2107 | u8 fdirm = 0; | ||
2098 | 2108 | ||
2099 | /* Get our input values */ | 2109 | /* Get our input values */ |
2100 | ixgbe_atr_get_l4type_82599(input, &l4type); | 2110 | ixgbe_atr_get_l4type_82599(input, &l4type); |
@@ -2149,7 +2159,6 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, | |||
2149 | /* IPv4 */ | 2159 | /* IPv4 */ |
2150 | ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); | 2160 | ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); |
2151 | IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); | 2161 | IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); |
2152 | |||
2153 | } | 2162 | } |
2154 | 2163 | ||
2155 | ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); | 2164 | ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); |
@@ -2158,7 +2167,78 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, | |||
2158 | IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | | 2167 | IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | |
2159 | (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); | 2168 | (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); |
2160 | IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | | 2169 | IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | |
2161 | (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); | 2170 | (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); |
2171 | |||
2172 | /* | ||
2173 | * Program the relevant mask registers. If src/dst_port or src/dst_addr | ||
2174 | * are zero, then assume a full mask for that field. Also assume that | ||
2175 | * a VLAN of 0 is unspecified, so mask that out as well. L4type | ||
2176 | * cannot be masked out in this implementation. | ||
2177 | * | ||
2178 | * This also assumes IPv4 only. IPv6 masking isn't supported at this | ||
2179 | * point in time. | ||
2180 | */ | ||
2181 | if (src_ipv4 == 0) | ||
2182 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff); | ||
2183 | else | ||
2184 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask); | ||
2185 | |||
2186 | if (dst_ipv4 == 0) | ||
2187 | IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff); | ||
2188 | else | ||
2189 | IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask); | ||
2190 | |||
2191 | switch (l4type & IXGBE_ATR_L4TYPE_MASK) { | ||
2192 | case IXGBE_ATR_L4TYPE_TCP: | ||
2193 | if (src_port == 0) | ||
2194 | IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff); | ||
2195 | else | ||
2196 | IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, | ||
2197 | input_masks->src_port_mask); | ||
2198 | |||
2199 | if (dst_port == 0) | ||
2200 | IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, | ||
2201 | (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | | ||
2202 | (0xffff << 16))); | ||
2203 | else | ||
2204 | IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, | ||
2205 | (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | | ||
2206 | (input_masks->dst_port_mask << 16))); | ||
2207 | break; | ||
2208 | case IXGBE_ATR_L4TYPE_UDP: | ||
2209 | if (src_port == 0) | ||
2210 | IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff); | ||
2211 | else | ||
2212 | IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, | ||
2213 | input_masks->src_port_mask); | ||
2214 | |||
2215 | if (dst_port == 0) | ||
2216 | IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, | ||
2217 | (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | | ||
2218 | (0xffff << 16))); | ||
2219 | else | ||
2220 | IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, | ||
2221 | (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | | ||
2222 | (input_masks->src_port_mask << 16))); | ||
2223 | break; | ||
2224 | default: | ||
2225 | /* this already would have failed above */ | ||
2226 | break; | ||
2227 | } | ||
2228 | |||
2229 | /* Program the last mask register, FDIRM */ | ||
2230 | if (input_masks->vlan_id_mask || !vlan_id) | ||
2231 | /* Mask both VLAN and VLANP - bits 0 and 1 */ | ||
2232 | fdirm |= 0x3; | ||
2233 | |||
2234 | if (input_masks->data_mask || !flex_bytes) | ||
2235 | /* Flex bytes need masking, so mask the whole thing - bit 4 */ | ||
2236 | fdirm |= 0x10; | ||
2237 | |||
2238 | /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ | ||
2239 | fdirm |= 0x24; | ||
2240 | |||
2241 | IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); | ||
2162 | 2242 | ||
2163 | fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; | 2243 | fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; |
2164 | fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; | 2244 | fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; |
@@ -2655,4 +2735,5 @@ struct ixgbe_info ixgbe_82599_info = { | |||
2655 | .mac_ops = &mac_ops_82599, | 2735 | .mac_ops = &mac_ops_82599, |
2656 | .eeprom_ops = &eeprom_ops_82599, | 2736 | .eeprom_ops = &eeprom_ops_82599, |
2657 | .phy_ops = &phy_ops_82599, | 2737 | .phy_ops = &phy_ops_82599, |
2738 | .mbx_ops = &mbx_ops_82599, | ||
2658 | }; | 2739 | }; |
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 21f158f79dd..eb49020903c 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/list.h> | ||
32 | #include <linux/netdevice.h> | 31 | #include <linux/netdevice.h> |
33 | 32 | ||
34 | #include "ixgbe.h" | 33 | #include "ixgbe.h" |
@@ -1278,19 +1277,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) | |||
1278 | /* Get the MAC address from the RAR0 for later reference */ | 1277 | /* Get the MAC address from the RAR0 for later reference */ |
1279 | hw->mac.ops.get_mac_addr(hw, hw->mac.addr); | 1278 | hw->mac.ops.get_mac_addr(hw, hw->mac.addr); |
1280 | 1279 | ||
1281 | hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", | 1280 | hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); |
1282 | hw->mac.addr[0], hw->mac.addr[1], | ||
1283 | hw->mac.addr[2]); | ||
1284 | hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], | ||
1285 | hw->mac.addr[4], hw->mac.addr[5]); | ||
1286 | } else { | 1281 | } else { |
1287 | /* Setup the receive address. */ | 1282 | /* Setup the receive address. */ |
1288 | hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); | 1283 | hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); |
1289 | hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", | 1284 | hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); |
1290 | hw->mac.addr[0], hw->mac.addr[1], | ||
1291 | hw->mac.addr[2]); | ||
1292 | hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], | ||
1293 | hw->mac.addr[4], hw->mac.addr[5]); | ||
1294 | 1285 | ||
1295 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | 1286 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
1296 | } | 1287 | } |
@@ -1355,7 +1346,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) | |||
1355 | /** | 1346 | /** |
1356 | * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses | 1347 | * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses |
1357 | * @hw: pointer to hardware structure | 1348 | * @hw: pointer to hardware structure |
1358 | * @uc_list: the list of new addresses | 1349 | * @netdev: pointer to net device structure |
1359 | * | 1350 | * |
1360 | * The given list replaces any existing list. Clears the secondary addrs from | 1351 | * The given list replaces any existing list. Clears the secondary addrs from |
1361 | * receive address registers. Uses unused receive address registers for the | 1352 | * receive address registers. Uses unused receive address registers for the |
@@ -1365,7 +1356,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) | |||
1365 | * manually putting the device into promiscuous mode. | 1356 | * manually putting the device into promiscuous mode. |
1366 | **/ | 1357 | **/ |
1367 | s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, | 1358 | s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, |
1368 | struct list_head *uc_list) | 1359 | struct net_device *netdev) |
1369 | { | 1360 | { |
1370 | u32 i; | 1361 | u32 i; |
1371 | u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; | 1362 | u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; |
@@ -1389,7 +1380,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, | |||
1389 | } | 1380 | } |
1390 | 1381 | ||
1391 | /* Add the new addresses */ | 1382 | /* Add the new addresses */ |
1392 | list_for_each_entry(ha, uc_list, list) { | 1383 | netdev_for_each_uc_addr(ha, netdev) { |
1393 | hw_dbg(hw, " Adding the secondary addresses:\n"); | 1384 | hw_dbg(hw, " Adding the secondary addresses:\n"); |
1394 | ixgbe_add_uc_addr(hw, ha->addr, 0); | 1385 | ixgbe_add_uc_addr(hw, ha->addr, 0); |
1395 | } | 1386 | } |
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index dfff0ffaa50..13606d4809c 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h | |||
@@ -60,7 +60,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, | |||
60 | u32 mc_addr_count, | 60 | u32 mc_addr_count, |
61 | ixgbe_mc_addr_itr func); | 61 | ixgbe_mc_addr_itr func); |
62 | s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, | 62 | s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, |
63 | struct list_head *uc_list); | 63 | struct net_device *netdev); |
64 | s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); | 64 | s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); |
65 | s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); | 65 | s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); |
66 | s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); | 66 | s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index d77961fc75f..7949a446e4c 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -441,10 +441,8 @@ static int ixgbe_set_tso(struct net_device *netdev, u32 data) | |||
441 | netdev->features |= NETIF_F_TSO; | 441 | netdev->features |= NETIF_F_TSO; |
442 | netdev->features |= NETIF_F_TSO6; | 442 | netdev->features |= NETIF_F_TSO6; |
443 | } else { | 443 | } else { |
444 | netif_tx_stop_all_queues(netdev); | ||
445 | netdev->features &= ~NETIF_F_TSO; | 444 | netdev->features &= ~NETIF_F_TSO; |
446 | netdev->features &= ~NETIF_F_TSO6; | 445 | netdev->features &= ~NETIF_F_TSO6; |
447 | netif_tx_start_all_queues(netdev); | ||
448 | } | 446 | } |
449 | return 0; | 447 | return 0; |
450 | } | 448 | } |
@@ -834,8 +832,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev, | |||
834 | struct ethtool_ringparam *ring) | 832 | struct ethtool_ringparam *ring) |
835 | { | 833 | { |
836 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 834 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
837 | struct ixgbe_ring *tx_ring = adapter->tx_ring; | 835 | struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; |
838 | struct ixgbe_ring *rx_ring = adapter->rx_ring; | 836 | struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; |
839 | 837 | ||
840 | ring->rx_max_pending = IXGBE_MAX_RXD; | 838 | ring->rx_max_pending = IXGBE_MAX_RXD; |
841 | ring->tx_max_pending = IXGBE_MAX_TXD; | 839 | ring->tx_max_pending = IXGBE_MAX_TXD; |
@@ -867,8 +865,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
867 | new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); | 865 | new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); |
868 | new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); | 866 | new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); |
869 | 867 | ||
870 | if ((new_tx_count == adapter->tx_ring->count) && | 868 | if ((new_tx_count == adapter->tx_ring[0]->count) && |
871 | (new_rx_count == adapter->rx_ring->count)) { | 869 | (new_rx_count == adapter->rx_ring[0]->count)) { |
872 | /* nothing to do */ | 870 | /* nothing to do */ |
873 | return 0; | 871 | return 0; |
874 | } | 872 | } |
@@ -878,25 +876,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
878 | 876 | ||
879 | if (!netif_running(adapter->netdev)) { | 877 | if (!netif_running(adapter->netdev)) { |
880 | for (i = 0; i < adapter->num_tx_queues; i++) | 878 | for (i = 0; i < adapter->num_tx_queues; i++) |
881 | adapter->tx_ring[i].count = new_tx_count; | 879 | adapter->tx_ring[i]->count = new_tx_count; |
882 | for (i = 0; i < adapter->num_rx_queues; i++) | 880 | for (i = 0; i < adapter->num_rx_queues; i++) |
883 | adapter->rx_ring[i].count = new_rx_count; | 881 | adapter->rx_ring[i]->count = new_rx_count; |
884 | adapter->tx_ring_count = new_tx_count; | 882 | adapter->tx_ring_count = new_tx_count; |
885 | adapter->rx_ring_count = new_rx_count; | 883 | adapter->rx_ring_count = new_rx_count; |
886 | goto err_setup; | 884 | goto clear_reset; |
887 | } | 885 | } |
888 | 886 | ||
889 | temp_tx_ring = kcalloc(adapter->num_tx_queues, | 887 | temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); |
890 | sizeof(struct ixgbe_ring), GFP_KERNEL); | ||
891 | if (!temp_tx_ring) { | 888 | if (!temp_tx_ring) { |
892 | err = -ENOMEM; | 889 | err = -ENOMEM; |
893 | goto err_setup; | 890 | goto clear_reset; |
894 | } | 891 | } |
895 | 892 | ||
896 | if (new_tx_count != adapter->tx_ring_count) { | 893 | if (new_tx_count != adapter->tx_ring_count) { |
897 | memcpy(temp_tx_ring, adapter->tx_ring, | ||
898 | adapter->num_tx_queues * sizeof(struct ixgbe_ring)); | ||
899 | for (i = 0; i < adapter->num_tx_queues; i++) { | 894 | for (i = 0; i < adapter->num_tx_queues; i++) { |
895 | memcpy(&temp_tx_ring[i], adapter->tx_ring[i], | ||
896 | sizeof(struct ixgbe_ring)); | ||
900 | temp_tx_ring[i].count = new_tx_count; | 897 | temp_tx_ring[i].count = new_tx_count; |
901 | err = ixgbe_setup_tx_resources(adapter, | 898 | err = ixgbe_setup_tx_resources(adapter, |
902 | &temp_tx_ring[i]); | 899 | &temp_tx_ring[i]); |
@@ -904,28 +901,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
904 | while (i) { | 901 | while (i) { |
905 | i--; | 902 | i--; |
906 | ixgbe_free_tx_resources(adapter, | 903 | ixgbe_free_tx_resources(adapter, |
907 | &temp_tx_ring[i]); | 904 | &temp_tx_ring[i]); |
908 | } | 905 | } |
909 | goto err_setup; | 906 | goto clear_reset; |
910 | } | 907 | } |
911 | } | 908 | } |
912 | need_update = true; | 909 | need_update = true; |
913 | } | 910 | } |
914 | 911 | ||
915 | temp_rx_ring = kcalloc(adapter->num_rx_queues, | 912 | temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); |
916 | sizeof(struct ixgbe_ring), GFP_KERNEL); | 913 | if (!temp_rx_ring) { |
917 | if ((!temp_rx_ring) && (need_update)) { | ||
918 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
919 | ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]); | ||
920 | kfree(temp_tx_ring); | ||
921 | err = -ENOMEM; | 914 | err = -ENOMEM; |
922 | goto err_setup; | 915 | goto err_setup; |
923 | } | 916 | } |
924 | 917 | ||
925 | if (new_rx_count != adapter->rx_ring_count) { | 918 | if (new_rx_count != adapter->rx_ring_count) { |
926 | memcpy(temp_rx_ring, adapter->rx_ring, | ||
927 | adapter->num_rx_queues * sizeof(struct ixgbe_ring)); | ||
928 | for (i = 0; i < adapter->num_rx_queues; i++) { | 919 | for (i = 0; i < adapter->num_rx_queues; i++) { |
920 | memcpy(&temp_rx_ring[i], adapter->rx_ring[i], | ||
921 | sizeof(struct ixgbe_ring)); | ||
929 | temp_rx_ring[i].count = new_rx_count; | 922 | temp_rx_ring[i].count = new_rx_count; |
930 | err = ixgbe_setup_rx_resources(adapter, | 923 | err = ixgbe_setup_rx_resources(adapter, |
931 | &temp_rx_ring[i]); | 924 | &temp_rx_ring[i]); |
@@ -947,22 +940,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
947 | 940 | ||
948 | /* tx */ | 941 | /* tx */ |
949 | if (new_tx_count != adapter->tx_ring_count) { | 942 | if (new_tx_count != adapter->tx_ring_count) { |
950 | kfree(adapter->tx_ring); | 943 | for (i = 0; i < adapter->num_tx_queues; i++) { |
951 | adapter->tx_ring = temp_tx_ring; | 944 | ixgbe_free_tx_resources(adapter, |
952 | temp_tx_ring = NULL; | 945 | adapter->tx_ring[i]); |
946 | memcpy(adapter->tx_ring[i], &temp_tx_ring[i], | ||
947 | sizeof(struct ixgbe_ring)); | ||
948 | } | ||
953 | adapter->tx_ring_count = new_tx_count; | 949 | adapter->tx_ring_count = new_tx_count; |
954 | } | 950 | } |
955 | 951 | ||
956 | /* rx */ | 952 | /* rx */ |
957 | if (new_rx_count != adapter->rx_ring_count) { | 953 | if (new_rx_count != adapter->rx_ring_count) { |
958 | kfree(adapter->rx_ring); | 954 | for (i = 0; i < adapter->num_rx_queues; i++) { |
959 | adapter->rx_ring = temp_rx_ring; | 955 | ixgbe_free_rx_resources(adapter, |
960 | temp_rx_ring = NULL; | 956 | adapter->rx_ring[i]); |
957 | memcpy(adapter->rx_ring[i], &temp_rx_ring[i], | ||
958 | sizeof(struct ixgbe_ring)); | ||
959 | } | ||
961 | adapter->rx_ring_count = new_rx_count; | 960 | adapter->rx_ring_count = new_rx_count; |
962 | } | 961 | } |
963 | ixgbe_up(adapter); | 962 | ixgbe_up(adapter); |
964 | } | 963 | } |
964 | |||
965 | vfree(temp_rx_ring); | ||
965 | err_setup: | 966 | err_setup: |
967 | vfree(temp_tx_ring); | ||
968 | clear_reset: | ||
966 | clear_bit(__IXGBE_RESETTING, &adapter->state); | 969 | clear_bit(__IXGBE_RESETTING, &adapter->state); |
967 | return err; | 970 | return err; |
968 | } | 971 | } |
@@ -974,6 +977,9 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset) | |||
974 | return IXGBE_TEST_LEN; | 977 | return IXGBE_TEST_LEN; |
975 | case ETH_SS_STATS: | 978 | case ETH_SS_STATS: |
976 | return IXGBE_STATS_LEN; | 979 | return IXGBE_STATS_LEN; |
980 | case ETH_SS_NTUPLE_FILTERS: | ||
981 | return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY * | ||
982 | ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY); | ||
977 | default: | 983 | default: |
978 | return -EOPNOTSUPP; | 984 | return -EOPNOTSUPP; |
979 | } | 985 | } |
@@ -1007,13 +1013,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, | |||
1007 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | 1013 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
1008 | } | 1014 | } |
1009 | for (j = 0; j < adapter->num_tx_queues; j++) { | 1015 | for (j = 0; j < adapter->num_tx_queues; j++) { |
1010 | queue_stat = (u64 *)&adapter->tx_ring[j].stats; | 1016 | queue_stat = (u64 *)&adapter->tx_ring[j]->stats; |
1011 | for (k = 0; k < stat_count; k++) | 1017 | for (k = 0; k < stat_count; k++) |
1012 | data[i + k] = queue_stat[k]; | 1018 | data[i + k] = queue_stat[k]; |
1013 | i += k; | 1019 | i += k; |
1014 | } | 1020 | } |
1015 | for (j = 0; j < adapter->num_rx_queues; j++) { | 1021 | for (j = 0; j < adapter->num_rx_queues; j++) { |
1016 | queue_stat = (u64 *)&adapter->rx_ring[j].stats; | 1022 | queue_stat = (u64 *)&adapter->rx_ring[j]->stats; |
1017 | for (k = 0; k < stat_count; k++) | 1023 | for (k = 0; k < stat_count; k++) |
1018 | data[i + k] = queue_stat[k]; | 1024 | data[i + k] = queue_stat[k]; |
1019 | i += k; | 1025 | i += k; |
@@ -1627,7 +1633,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) | |||
1627 | reg_data |= IXGBE_RXDCTL_ENABLE; | 1633 | reg_data |= IXGBE_RXDCTL_ENABLE; |
1628 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data); | 1634 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data); |
1629 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1635 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
1630 | int j = adapter->rx_ring[0].reg_idx; | 1636 | int j = adapter->rx_ring[0]->reg_idx; |
1631 | u32 k; | 1637 | u32 k; |
1632 | for (k = 0; k < 10; k++) { | 1638 | for (k = 0; k < 10; k++) { |
1633 | if (IXGBE_READ_REG(&adapter->hw, | 1639 | if (IXGBE_READ_REG(&adapter->hw, |
@@ -1867,11 +1873,22 @@ static void ixgbe_diag_test(struct net_device *netdev, | |||
1867 | if (ixgbe_intr_test(adapter, &data[2])) | 1873 | if (ixgbe_intr_test(adapter, &data[2])) |
1868 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1874 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1869 | 1875 | ||
1876 | /* If SRIOV or VMDq is enabled then skip MAC | ||
1877 | * loopback diagnostic. */ | ||
1878 | if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | | ||
1879 | IXGBE_FLAG_VMDQ_ENABLED)) { | ||
1880 | DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT " | ||
1881 | "mode\n"); | ||
1882 | data[3] = 0; | ||
1883 | goto skip_loopback; | ||
1884 | } | ||
1885 | |||
1870 | ixgbe_reset(adapter); | 1886 | ixgbe_reset(adapter); |
1871 | DPRINTK(HW, INFO, "loopback testing starting\n"); | 1887 | DPRINTK(HW, INFO, "loopback testing starting\n"); |
1872 | if (ixgbe_loopback_test(adapter, &data[3])) | 1888 | if (ixgbe_loopback_test(adapter, &data[3])) |
1873 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1889 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1874 | 1890 | ||
1891 | skip_loopback: | ||
1875 | ixgbe_reset(adapter); | 1892 | ixgbe_reset(adapter); |
1876 | 1893 | ||
1877 | clear_bit(__IXGBE_TESTING, &adapter->state); | 1894 | clear_bit(__IXGBE_TESTING, &adapter->state); |
@@ -2000,7 +2017,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev, | |||
2000 | { | 2017 | { |
2001 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 2018 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
2002 | 2019 | ||
2003 | ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; | 2020 | ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit; |
2004 | 2021 | ||
2005 | /* only valid if in constant ITR mode */ | 2022 | /* only valid if in constant ITR mode */ |
2006 | switch (adapter->rx_itr_setting) { | 2023 | switch (adapter->rx_itr_setting) { |
@@ -2053,7 +2070,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
2053 | return -EINVAL; | 2070 | return -EINVAL; |
2054 | 2071 | ||
2055 | if (ec->tx_max_coalesced_frames_irq) | 2072 | if (ec->tx_max_coalesced_frames_irq) |
2056 | adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq; | 2073 | adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; |
2057 | 2074 | ||
2058 | if (ec->rx_coalesce_usecs > 1) { | 2075 | if (ec->rx_coalesce_usecs > 1) { |
2059 | /* check the limits */ | 2076 | /* check the limits */ |
@@ -2134,23 +2151,124 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
2134 | static int ixgbe_set_flags(struct net_device *netdev, u32 data) | 2151 | static int ixgbe_set_flags(struct net_device *netdev, u32 data) |
2135 | { | 2152 | { |
2136 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 2153 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
2154 | bool need_reset = false; | ||
2137 | 2155 | ||
2138 | ethtool_op_set_flags(netdev, data); | 2156 | ethtool_op_set_flags(netdev, data); |
2139 | 2157 | ||
2140 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) | ||
2141 | return 0; | ||
2142 | |||
2143 | /* if state changes we need to update adapter->flags and reset */ | 2158 | /* if state changes we need to update adapter->flags and reset */ |
2144 | if ((!!(data & ETH_FLAG_LRO)) != | 2159 | if ((!!(data & ETH_FLAG_LRO)) != |
2145 | (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { | 2160 | (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { |
2146 | adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; | 2161 | adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; |
2162 | need_reset = true; | ||
2163 | } | ||
2164 | |||
2165 | /* | ||
2166 | * Check if Flow Director n-tuple support was enabled or disabled. If | ||
2167 | * the state changed, we need to reset. | ||
2168 | */ | ||
2169 | if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) && | ||
2170 | (!(data & ETH_FLAG_NTUPLE))) { | ||
2171 | /* turn off Flow Director perfect, set hash and reset */ | ||
2172 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
2173 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
2174 | need_reset = true; | ||
2175 | } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) && | ||
2176 | (data & ETH_FLAG_NTUPLE)) { | ||
2177 | /* turn off Flow Director hash, enable perfect and reset */ | ||
2178 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
2179 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
2180 | need_reset = true; | ||
2181 | } else { | ||
2182 | /* no state change */ | ||
2183 | } | ||
2184 | |||
2185 | if (need_reset) { | ||
2147 | if (netif_running(netdev)) | 2186 | if (netif_running(netdev)) |
2148 | ixgbe_reinit_locked(adapter); | 2187 | ixgbe_reinit_locked(adapter); |
2149 | else | 2188 | else |
2150 | ixgbe_reset(adapter); | 2189 | ixgbe_reset(adapter); |
2151 | } | 2190 | } |
2191 | |||
2152 | return 0; | 2192 | return 0; |
2193 | } | ||
2153 | 2194 | ||
2195 | static int ixgbe_set_rx_ntuple(struct net_device *dev, | ||
2196 | struct ethtool_rx_ntuple *cmd) | ||
2197 | { | ||
2198 | struct ixgbe_adapter *adapter = netdev_priv(dev); | ||
2199 | struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; | ||
2200 | struct ixgbe_atr_input input_struct; | ||
2201 | struct ixgbe_atr_input_masks input_masks; | ||
2202 | int target_queue; | ||
2203 | |||
2204 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | ||
2205 | return -EOPNOTSUPP; | ||
2206 | |||
2207 | /* | ||
2208 | * Don't allow programming if the action is a queue greater than | ||
2209 | * the number of online Tx queues. | ||
2210 | */ | ||
2211 | if ((fs.action >= adapter->num_tx_queues) || | ||
2212 | (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) | ||
2213 | return -EINVAL; | ||
2214 | |||
2215 | memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); | ||
2216 | memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); | ||
2217 | |||
2218 | input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; | ||
2219 | input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; | ||
2220 | input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; | ||
2221 | input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; | ||
2222 | input_masks.vlan_id_mask = fs.vlan_tag_mask; | ||
2223 | /* only use the lowest 2 bytes for flex bytes */ | ||
2224 | input_masks.data_mask = (fs.data_mask & 0xffff); | ||
2225 | |||
2226 | switch (fs.flow_type) { | ||
2227 | case TCP_V4_FLOW: | ||
2228 | ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); | ||
2229 | break; | ||
2230 | case UDP_V4_FLOW: | ||
2231 | ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); | ||
2232 | break; | ||
2233 | case SCTP_V4_FLOW: | ||
2234 | ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); | ||
2235 | break; | ||
2236 | default: | ||
2237 | return -1; | ||
2238 | } | ||
2239 | |||
2240 | /* Mask bits from the inputs based on user-supplied mask */ | ||
2241 | ixgbe_atr_set_src_ipv4_82599(&input_struct, | ||
2242 | (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); | ||
2243 | ixgbe_atr_set_dst_ipv4_82599(&input_struct, | ||
2244 | (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); | ||
2245 | /* 82599 expects these to be byte-swapped for perfect filtering */ | ||
2246 | ixgbe_atr_set_src_port_82599(&input_struct, | ||
2247 | ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); | ||
2248 | ixgbe_atr_set_dst_port_82599(&input_struct, | ||
2249 | ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); | ||
2250 | |||
2251 | /* VLAN and Flex bytes are either completely masked or not */ | ||
2252 | if (!fs.vlan_tag_mask) | ||
2253 | ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); | ||
2254 | |||
2255 | if (!input_masks.data_mask) | ||
2256 | /* make sure we only use the first 2 bytes of user data */ | ||
2257 | ixgbe_atr_set_flex_byte_82599(&input_struct, | ||
2258 | (fs.data & 0xffff)); | ||
2259 | |||
2260 | /* determine if we need to drop or route the packet */ | ||
2261 | if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) | ||
2262 | target_queue = MAX_RX_QUEUES - 1; | ||
2263 | else | ||
2264 | target_queue = fs.action; | ||
2265 | |||
2266 | spin_lock(&adapter->fdir_perfect_lock); | ||
2267 | ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, | ||
2268 | &input_masks, 0, target_queue); | ||
2269 | spin_unlock(&adapter->fdir_perfect_lock); | ||
2270 | |||
2271 | return 0; | ||
2154 | } | 2272 | } |
2155 | 2273 | ||
2156 | static const struct ethtool_ops ixgbe_ethtool_ops = { | 2274 | static const struct ethtool_ops ixgbe_ethtool_ops = { |
@@ -2188,6 +2306,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { | |||
2188 | .set_coalesce = ixgbe_set_coalesce, | 2306 | .set_coalesce = ixgbe_set_coalesce, |
2189 | .get_flags = ethtool_op_get_flags, | 2307 | .get_flags = ethtool_op_get_flags, |
2190 | .set_flags = ixgbe_set_flags, | 2308 | .set_flags = ixgbe_set_flags, |
2309 | .set_rx_ntuple = ixgbe_set_rx_ntuple, | ||
2191 | }; | 2310 | }; |
2192 | 2311 | ||
2193 | void ixgbe_set_ethtool_ops(struct net_device *netdev) | 2312 | void ixgbe_set_ethtool_ops(struct net_device *netdev) |
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index e9a20c88c15..4123dec0dfb 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -525,7 +525,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
525 | for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { | 525 | for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { |
526 | fcoe_i = f->mask + i % f->indices; | 526 | fcoe_i = f->mask + i % f->indices; |
527 | fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; | 527 | fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; |
528 | fcoe_q = adapter->rx_ring[fcoe_i].reg_idx; | 528 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; |
529 | IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); | 529 | IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); |
530 | } | 530 | } |
531 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); | 531 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); |
@@ -533,7 +533,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
533 | } else { | 533 | } else { |
534 | /* Use single rx queue for FCoE */ | 534 | /* Use single rx queue for FCoE */ |
535 | fcoe_i = f->mask; | 535 | fcoe_i = f->mask; |
536 | fcoe_q = adapter->rx_ring[fcoe_i].reg_idx; | 536 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; |
537 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); | 537 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); |
538 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), | 538 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), |
539 | IXGBE_ETQS_QUEUE_EN | | 539 | IXGBE_ETQS_QUEUE_EN | |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 951b73cf5ca..45e3532b166 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -45,12 +45,13 @@ | |||
45 | #include "ixgbe.h" | 45 | #include "ixgbe.h" |
46 | #include "ixgbe_common.h" | 46 | #include "ixgbe_common.h" |
47 | #include "ixgbe_dcb_82599.h" | 47 | #include "ixgbe_dcb_82599.h" |
48 | #include "ixgbe_sriov.h" | ||
48 | 49 | ||
49 | char ixgbe_driver_name[] = "ixgbe"; | 50 | char ixgbe_driver_name[] = "ixgbe"; |
50 | static const char ixgbe_driver_string[] = | 51 | static const char ixgbe_driver_string[] = |
51 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 52 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
52 | 53 | ||
53 | #define DRV_VERSION "2.0.44-k2" | 54 | #define DRV_VERSION "2.0.62-k2" |
54 | const char ixgbe_driver_version[] = DRV_VERSION; | 55 | const char ixgbe_driver_version[] = DRV_VERSION; |
55 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; | 56 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; |
56 | 57 | ||
@@ -67,7 +68,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { | |||
67 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, | 68 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, |
68 | * Class, Class Mask, private data (not used) } | 69 | * Class, Class Mask, private data (not used) } |
69 | */ | 70 | */ |
70 | static struct pci_device_id ixgbe_pci_tbl[] = { | 71 | static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { |
71 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), | 72 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), |
72 | board_82598 }, | 73 | board_82598 }, |
73 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), | 74 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), |
@@ -124,6 +125,13 @@ static struct notifier_block dca_notifier = { | |||
124 | }; | 125 | }; |
125 | #endif | 126 | #endif |
126 | 127 | ||
128 | #ifdef CONFIG_PCI_IOV | ||
129 | static unsigned int max_vfs; | ||
130 | module_param(max_vfs, uint, 0); | ||
131 | MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " | ||
132 | "per physical function"); | ||
133 | #endif /* CONFIG_PCI_IOV */ | ||
134 | |||
127 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); | 135 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
128 | MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); | 136 | MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); |
129 | MODULE_LICENSE("GPL"); | 137 | MODULE_LICENSE("GPL"); |
@@ -131,6 +139,41 @@ MODULE_VERSION(DRV_VERSION); | |||
131 | 139 | ||
132 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 | 140 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 |
133 | 141 | ||
142 | static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) | ||
143 | { | ||
144 | struct ixgbe_hw *hw = &adapter->hw; | ||
145 | u32 gcr; | ||
146 | u32 gpie; | ||
147 | u32 vmdctl; | ||
148 | |||
149 | #ifdef CONFIG_PCI_IOV | ||
150 | /* disable iov and allow time for transactions to clear */ | ||
151 | pci_disable_sriov(adapter->pdev); | ||
152 | #endif | ||
153 | |||
154 | /* turn off device IOV mode */ | ||
155 | gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | ||
156 | gcr &= ~(IXGBE_GCR_EXT_SRIOV); | ||
157 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); | ||
158 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | ||
159 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; | ||
160 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | ||
161 | |||
162 | /* set default pool back to 0 */ | ||
163 | vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | ||
164 | vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; | ||
165 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); | ||
166 | |||
167 | /* take a breather then clean up driver data */ | ||
168 | msleep(100); | ||
169 | if (adapter->vfinfo) | ||
170 | kfree(adapter->vfinfo); | ||
171 | adapter->vfinfo = NULL; | ||
172 | |||
173 | adapter->num_vfs = 0; | ||
174 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; | ||
175 | } | ||
176 | |||
134 | static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) | 177 | static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) |
135 | { | 178 | { |
136 | u32 ctrl_ext; | 179 | u32 ctrl_ext; |
@@ -451,7 +494,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, | |||
451 | { | 494 | { |
452 | u32 rxctrl; | 495 | u32 rxctrl; |
453 | int cpu = get_cpu(); | 496 | int cpu = get_cpu(); |
454 | int q = rx_ring - adapter->rx_ring; | 497 | int q = rx_ring->reg_idx; |
455 | 498 | ||
456 | if (rx_ring->cpu != cpu) { | 499 | if (rx_ring->cpu != cpu) { |
457 | rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); | 500 | rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); |
@@ -479,7 +522,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | |||
479 | { | 522 | { |
480 | u32 txctrl; | 523 | u32 txctrl; |
481 | int cpu = get_cpu(); | 524 | int cpu = get_cpu(); |
482 | int q = tx_ring - adapter->tx_ring; | 525 | int q = tx_ring->reg_idx; |
483 | struct ixgbe_hw *hw = &adapter->hw; | 526 | struct ixgbe_hw *hw = &adapter->hw; |
484 | 527 | ||
485 | if (tx_ring->cpu != cpu) { | 528 | if (tx_ring->cpu != cpu) { |
@@ -513,12 +556,12 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) | |||
513 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); | 556 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); |
514 | 557 | ||
515 | for (i = 0; i < adapter->num_tx_queues; i++) { | 558 | for (i = 0; i < adapter->num_tx_queues; i++) { |
516 | adapter->tx_ring[i].cpu = -1; | 559 | adapter->tx_ring[i]->cpu = -1; |
517 | ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]); | 560 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]); |
518 | } | 561 | } |
519 | for (i = 0; i < adapter->num_rx_queues; i++) { | 562 | for (i = 0; i < adapter->num_rx_queues; i++) { |
520 | adapter->rx_ring[i].cpu = -1; | 563 | adapter->rx_ring[i]->cpu = -1; |
521 | ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]); | 564 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]); |
522 | } | 565 | } |
523 | } | 566 | } |
524 | 567 | ||
@@ -775,6 +818,12 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, | |||
775 | return skb; | 818 | return skb; |
776 | } | 819 | } |
777 | 820 | ||
821 | struct ixgbe_rsc_cb { | ||
822 | dma_addr_t dma; | ||
823 | }; | ||
824 | |||
825 | #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) | ||
826 | |||
778 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | 827 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
779 | struct ixgbe_ring *rx_ring, | 828 | struct ixgbe_ring *rx_ring, |
780 | int *work_done, int work_to_do) | 829 | int *work_done, int work_to_do) |
@@ -806,6 +855,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
806 | break; | 855 | break; |
807 | (*work_done)++; | 856 | (*work_done)++; |
808 | 857 | ||
858 | rmb(); /* read descriptor and rx_buffer_info after status DD */ | ||
809 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { | 859 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { |
810 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); | 860 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); |
811 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> | 861 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> |
@@ -823,9 +873,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
823 | rx_buffer_info->skb = NULL; | 873 | rx_buffer_info->skb = NULL; |
824 | 874 | ||
825 | if (rx_buffer_info->dma) { | 875 | if (rx_buffer_info->dma) { |
826 | pci_unmap_single(pdev, rx_buffer_info->dma, | 876 | if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && |
827 | rx_ring->rx_buf_len, | 877 | (!(staterr & IXGBE_RXD_STAT_EOP)) && |
828 | PCI_DMA_FROMDEVICE); | 878 | (!(skb->prev))) |
879 | /* | ||
880 | * When HWRSC is enabled, delay unmapping | ||
881 | * of the first packet. It carries the | ||
882 | * header information, HW may still | ||
883 | * access the header after the writeback. | ||
884 | * Only unmap it when EOP is reached | ||
885 | */ | ||
886 | IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; | ||
887 | else | ||
888 | pci_unmap_single(pdev, rx_buffer_info->dma, | ||
889 | rx_ring->rx_buf_len, | ||
890 | PCI_DMA_FROMDEVICE); | ||
829 | rx_buffer_info->dma = 0; | 891 | rx_buffer_info->dma = 0; |
830 | skb_put(skb, len); | 892 | skb_put(skb, len); |
831 | } | 893 | } |
@@ -873,6 +935,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
873 | if (skb->prev) | 935 | if (skb->prev) |
874 | skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); | 936 | skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); |
875 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | 937 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { |
938 | if (IXGBE_RSC_CB(skb)->dma) | ||
939 | pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, | ||
940 | rx_ring->rx_buf_len, | ||
941 | PCI_DMA_FROMDEVICE); | ||
876 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) | 942 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) |
877 | rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; | 943 | rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; |
878 | else | 944 | else |
@@ -989,7 +1055,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
989 | adapter->num_rx_queues); | 1055 | adapter->num_rx_queues); |
990 | 1056 | ||
991 | for (i = 0; i < q_vector->rxr_count; i++) { | 1057 | for (i = 0; i < q_vector->rxr_count; i++) { |
992 | j = adapter->rx_ring[r_idx].reg_idx; | 1058 | j = adapter->rx_ring[r_idx]->reg_idx; |
993 | ixgbe_set_ivar(adapter, 0, j, v_idx); | 1059 | ixgbe_set_ivar(adapter, 0, j, v_idx); |
994 | r_idx = find_next_bit(q_vector->rxr_idx, | 1060 | r_idx = find_next_bit(q_vector->rxr_idx, |
995 | adapter->num_rx_queues, | 1061 | adapter->num_rx_queues, |
@@ -999,7 +1065,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
999 | adapter->num_tx_queues); | 1065 | adapter->num_tx_queues); |
1000 | 1066 | ||
1001 | for (i = 0; i < q_vector->txr_count; i++) { | 1067 | for (i = 0; i < q_vector->txr_count; i++) { |
1002 | j = adapter->tx_ring[r_idx].reg_idx; | 1068 | j = adapter->tx_ring[r_idx]->reg_idx; |
1003 | ixgbe_set_ivar(adapter, 1, j, v_idx); | 1069 | ixgbe_set_ivar(adapter, 1, j, v_idx); |
1004 | r_idx = find_next_bit(q_vector->txr_idx, | 1070 | r_idx = find_next_bit(q_vector->txr_idx, |
1005 | adapter->num_tx_queues, | 1071 | adapter->num_tx_queues, |
@@ -1025,7 +1091,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1025 | 1091 | ||
1026 | /* set up to autoclear timer, and the vectors */ | 1092 | /* set up to autoclear timer, and the vectors */ |
1027 | mask = IXGBE_EIMS_ENABLE_MASK; | 1093 | mask = IXGBE_EIMS_ENABLE_MASK; |
1028 | mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); | 1094 | if (adapter->num_vfs) |
1095 | mask &= ~(IXGBE_EIMS_OTHER | | ||
1096 | IXGBE_EIMS_MAILBOX | | ||
1097 | IXGBE_EIMS_LSC); | ||
1098 | else | ||
1099 | mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); | ||
1029 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); | 1100 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); |
1030 | } | 1101 | } |
1031 | 1102 | ||
@@ -1134,7 +1205,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
1134 | 1205 | ||
1135 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1206 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1136 | for (i = 0; i < q_vector->txr_count; i++) { | 1207 | for (i = 0; i < q_vector->txr_count; i++) { |
1137 | tx_ring = &(adapter->tx_ring[r_idx]); | 1208 | tx_ring = adapter->tx_ring[r_idx]; |
1138 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | 1209 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
1139 | q_vector->tx_itr, | 1210 | q_vector->tx_itr, |
1140 | tx_ring->total_packets, | 1211 | tx_ring->total_packets, |
@@ -1149,7 +1220,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
1149 | 1220 | ||
1150 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1221 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1151 | for (i = 0; i < q_vector->rxr_count; i++) { | 1222 | for (i = 0; i < q_vector->rxr_count; i++) { |
1152 | rx_ring = &(adapter->rx_ring[r_idx]); | 1223 | rx_ring = adapter->rx_ring[r_idx]; |
1153 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | 1224 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
1154 | q_vector->rx_itr, | 1225 | q_vector->rx_itr, |
1155 | rx_ring->total_packets, | 1226 | rx_ring->total_packets, |
@@ -1254,6 +1325,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
1254 | if (eicr & IXGBE_EICR_LSC) | 1325 | if (eicr & IXGBE_EICR_LSC) |
1255 | ixgbe_check_lsc(adapter); | 1326 | ixgbe_check_lsc(adapter); |
1256 | 1327 | ||
1328 | if (eicr & IXGBE_EICR_MAILBOX) | ||
1329 | ixgbe_msg_task(adapter); | ||
1330 | |||
1257 | if (hw->mac.type == ixgbe_mac_82598EB) | 1331 | if (hw->mac.type == ixgbe_mac_82598EB) |
1258 | ixgbe_check_fan_failure(adapter, eicr); | 1332 | ixgbe_check_fan_failure(adapter, eicr); |
1259 | 1333 | ||
@@ -1268,7 +1342,7 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
1268 | netif_tx_stop_all_queues(netdev); | 1342 | netif_tx_stop_all_queues(netdev); |
1269 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1343 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1270 | struct ixgbe_ring *tx_ring = | 1344 | struct ixgbe_ring *tx_ring = |
1271 | &adapter->tx_ring[i]; | 1345 | adapter->tx_ring[i]; |
1272 | if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, | 1346 | if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, |
1273 | &tx_ring->reinit_state)) | 1347 | &tx_ring->reinit_state)) |
1274 | schedule_work(&adapter->fdir_reinit_task); | 1348 | schedule_work(&adapter->fdir_reinit_task); |
@@ -1327,7 +1401,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) | |||
1327 | 1401 | ||
1328 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1402 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1329 | for (i = 0; i < q_vector->txr_count; i++) { | 1403 | for (i = 0; i < q_vector->txr_count; i++) { |
1330 | tx_ring = &(adapter->tx_ring[r_idx]); | 1404 | tx_ring = adapter->tx_ring[r_idx]; |
1331 | tx_ring->total_bytes = 0; | 1405 | tx_ring->total_bytes = 0; |
1332 | tx_ring->total_packets = 0; | 1406 | tx_ring->total_packets = 0; |
1333 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 1407 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
@@ -1355,7 +1429,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) | |||
1355 | 1429 | ||
1356 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1430 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1357 | for (i = 0; i < q_vector->rxr_count; i++) { | 1431 | for (i = 0; i < q_vector->rxr_count; i++) { |
1358 | rx_ring = &(adapter->rx_ring[r_idx]); | 1432 | rx_ring = adapter->rx_ring[r_idx]; |
1359 | rx_ring->total_bytes = 0; | 1433 | rx_ring->total_bytes = 0; |
1360 | rx_ring->total_packets = 0; | 1434 | rx_ring->total_packets = 0; |
1361 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 1435 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
@@ -1385,7 +1459,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | |||
1385 | 1459 | ||
1386 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1460 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1387 | for (i = 0; i < q_vector->txr_count; i++) { | 1461 | for (i = 0; i < q_vector->txr_count; i++) { |
1388 | ring = &(adapter->tx_ring[r_idx]); | 1462 | ring = adapter->tx_ring[r_idx]; |
1389 | ring->total_bytes = 0; | 1463 | ring->total_bytes = 0; |
1390 | ring->total_packets = 0; | 1464 | ring->total_packets = 0; |
1391 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 1465 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
@@ -1394,7 +1468,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | |||
1394 | 1468 | ||
1395 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1469 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1396 | for (i = 0; i < q_vector->rxr_count; i++) { | 1470 | for (i = 0; i < q_vector->rxr_count; i++) { |
1397 | ring = &(adapter->rx_ring[r_idx]); | 1471 | ring = adapter->rx_ring[r_idx]; |
1398 | ring->total_bytes = 0; | 1472 | ring->total_bytes = 0; |
1399 | ring->total_packets = 0; | 1473 | ring->total_packets = 0; |
1400 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 1474 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
@@ -1425,7 +1499,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | |||
1425 | long r_idx; | 1499 | long r_idx; |
1426 | 1500 | ||
1427 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1501 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1428 | rx_ring = &(adapter->rx_ring[r_idx]); | 1502 | rx_ring = adapter->rx_ring[r_idx]; |
1429 | #ifdef CONFIG_IXGBE_DCA | 1503 | #ifdef CONFIG_IXGBE_DCA |
1430 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 1504 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1431 | ixgbe_update_rx_dca(adapter, rx_ring); | 1505 | ixgbe_update_rx_dca(adapter, rx_ring); |
@@ -1466,7 +1540,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | |||
1466 | 1540 | ||
1467 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1541 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1468 | for (i = 0; i < q_vector->txr_count; i++) { | 1542 | for (i = 0; i < q_vector->txr_count; i++) { |
1469 | ring = &(adapter->tx_ring[r_idx]); | 1543 | ring = adapter->tx_ring[r_idx]; |
1470 | #ifdef CONFIG_IXGBE_DCA | 1544 | #ifdef CONFIG_IXGBE_DCA |
1471 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 1545 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1472 | ixgbe_update_tx_dca(adapter, ring); | 1546 | ixgbe_update_tx_dca(adapter, ring); |
@@ -1482,7 +1556,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | |||
1482 | budget = max(budget, 1); | 1556 | budget = max(budget, 1); |
1483 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1557 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1484 | for (i = 0; i < q_vector->rxr_count; i++) { | 1558 | for (i = 0; i < q_vector->rxr_count; i++) { |
1485 | ring = &(adapter->rx_ring[r_idx]); | 1559 | ring = adapter->rx_ring[r_idx]; |
1486 | #ifdef CONFIG_IXGBE_DCA | 1560 | #ifdef CONFIG_IXGBE_DCA |
1487 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 1561 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1488 | ixgbe_update_rx_dca(adapter, ring); | 1562 | ixgbe_update_rx_dca(adapter, ring); |
@@ -1493,7 +1567,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | |||
1493 | } | 1567 | } |
1494 | 1568 | ||
1495 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1569 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1496 | ring = &(adapter->rx_ring[r_idx]); | 1570 | ring = adapter->rx_ring[r_idx]; |
1497 | /* If all Rx work done, exit the polling mode */ | 1571 | /* If all Rx work done, exit the polling mode */ |
1498 | if (work_done < budget) { | 1572 | if (work_done < budget) { |
1499 | napi_complete(napi); | 1573 | napi_complete(napi); |
@@ -1526,7 +1600,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) | |||
1526 | long r_idx; | 1600 | long r_idx; |
1527 | 1601 | ||
1528 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1602 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1529 | tx_ring = &(adapter->tx_ring[r_idx]); | 1603 | tx_ring = adapter->tx_ring[r_idx]; |
1530 | #ifdef CONFIG_IXGBE_DCA | 1604 | #ifdef CONFIG_IXGBE_DCA |
1531 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 1605 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1532 | ixgbe_update_tx_dca(adapter, tx_ring); | 1606 | ixgbe_update_tx_dca(adapter, tx_ring); |
@@ -1711,8 +1785,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | |||
1711 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; | 1785 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
1712 | u8 current_itr; | 1786 | u8 current_itr; |
1713 | u32 new_itr = q_vector->eitr; | 1787 | u32 new_itr = q_vector->eitr; |
1714 | struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; | 1788 | struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; |
1715 | struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; | 1789 | struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; |
1716 | 1790 | ||
1717 | q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, | 1791 | q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, |
1718 | q_vector->tx_itr, | 1792 | q_vector->tx_itr, |
@@ -1768,6 +1842,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | |||
1768 | mask |= IXGBE_EIMS_ECC; | 1842 | mask |= IXGBE_EIMS_ECC; |
1769 | mask |= IXGBE_EIMS_GPI_SDP1; | 1843 | mask |= IXGBE_EIMS_GPI_SDP1; |
1770 | mask |= IXGBE_EIMS_GPI_SDP2; | 1844 | mask |= IXGBE_EIMS_GPI_SDP2; |
1845 | if (adapter->num_vfs) | ||
1846 | mask |= IXGBE_EIMS_MAILBOX; | ||
1771 | } | 1847 | } |
1772 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | 1848 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
1773 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | 1849 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) |
@@ -1776,6 +1852,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | |||
1776 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | 1852 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
1777 | ixgbe_irq_enable_queues(adapter, ~0); | 1853 | ixgbe_irq_enable_queues(adapter, ~0); |
1778 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1854 | IXGBE_WRITE_FLUSH(&adapter->hw); |
1855 | |||
1856 | if (adapter->num_vfs > 32) { | ||
1857 | u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; | ||
1858 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); | ||
1859 | } | ||
1779 | } | 1860 | } |
1780 | 1861 | ||
1781 | /** | 1862 | /** |
@@ -1817,10 +1898,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
1817 | ixgbe_check_fan_failure(adapter, eicr); | 1898 | ixgbe_check_fan_failure(adapter, eicr); |
1818 | 1899 | ||
1819 | if (napi_schedule_prep(&(q_vector->napi))) { | 1900 | if (napi_schedule_prep(&(q_vector->napi))) { |
1820 | adapter->tx_ring[0].total_packets = 0; | 1901 | adapter->tx_ring[0]->total_packets = 0; |
1821 | adapter->tx_ring[0].total_bytes = 0; | 1902 | adapter->tx_ring[0]->total_bytes = 0; |
1822 | adapter->rx_ring[0].total_packets = 0; | 1903 | adapter->rx_ring[0]->total_packets = 0; |
1823 | adapter->rx_ring[0].total_bytes = 0; | 1904 | adapter->rx_ring[0]->total_bytes = 0; |
1824 | /* would disable interrupts here but EIAM disabled it */ | 1905 | /* would disable interrupts here but EIAM disabled it */ |
1825 | __napi_schedule(&(q_vector->napi)); | 1906 | __napi_schedule(&(q_vector->napi)); |
1826 | } | 1907 | } |
@@ -1905,6 +1986,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) | |||
1905 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); | 1986 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); |
1906 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); | 1987 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); |
1907 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); | 1988 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); |
1989 | if (adapter->num_vfs > 32) | ||
1990 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); | ||
1908 | } | 1991 | } |
1909 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1992 | IXGBE_WRITE_FLUSH(&adapter->hw); |
1910 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 1993 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
@@ -1950,7 +2033,7 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | |||
1950 | 2033 | ||
1951 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 2034 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
1952 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2035 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1953 | struct ixgbe_ring *ring = &adapter->tx_ring[i]; | 2036 | struct ixgbe_ring *ring = adapter->tx_ring[i]; |
1954 | j = ring->reg_idx; | 2037 | j = ring->reg_idx; |
1955 | tdba = ring->dma; | 2038 | tdba = ring->dma; |
1956 | tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); | 2039 | tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); |
@@ -1960,8 +2043,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | |||
1960 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); | 2043 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); |
1961 | IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); | 2044 | IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); |
1962 | IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); | 2045 | IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); |
1963 | adapter->tx_ring[i].head = IXGBE_TDH(j); | 2046 | adapter->tx_ring[i]->head = IXGBE_TDH(j); |
1964 | adapter->tx_ring[i].tail = IXGBE_TDT(j); | 2047 | adapter->tx_ring[i]->tail = IXGBE_TDT(j); |
1965 | /* | 2048 | /* |
1966 | * Disable Tx Head Writeback RO bit, since this hoses | 2049 | * Disable Tx Head Writeback RO bit, since this hoses |
1967 | * bookkeeping if things aren't delivered in order. | 2050 | * bookkeeping if things aren't delivered in order. |
@@ -1989,18 +2072,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | |||
1989 | 2072 | ||
1990 | if (hw->mac.type == ixgbe_mac_82599EB) { | 2073 | if (hw->mac.type == ixgbe_mac_82599EB) { |
1991 | u32 rttdcs; | 2074 | u32 rttdcs; |
2075 | u32 mask; | ||
1992 | 2076 | ||
1993 | /* disable the arbiter while setting MTQC */ | 2077 | /* disable the arbiter while setting MTQC */ |
1994 | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); | 2078 | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); |
1995 | rttdcs |= IXGBE_RTTDCS_ARBDIS; | 2079 | rttdcs |= IXGBE_RTTDCS_ARBDIS; |
1996 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | 2080 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); |
1997 | 2081 | ||
1998 | /* We enable 8 traffic classes, DCB only */ | 2082 | /* set transmit pool layout */ |
1999 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | 2083 | mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); |
2000 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA | | 2084 | switch (adapter->flags & mask) { |
2001 | IXGBE_MTQC_8TC_8TQ)); | 2085 | |
2002 | else | 2086 | case (IXGBE_FLAG_SRIOV_ENABLED): |
2087 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | ||
2088 | (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); | ||
2089 | break; | ||
2090 | |||
2091 | case (IXGBE_FLAG_DCB_ENABLED): | ||
2092 | /* We enable 8 traffic classes, DCB only */ | ||
2093 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | ||
2094 | (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); | ||
2095 | break; | ||
2096 | |||
2097 | default: | ||
2003 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); | 2098 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); |
2099 | break; | ||
2100 | } | ||
2004 | 2101 | ||
2005 | /* re-eable the arbiter */ | 2102 | /* re-eable the arbiter */ |
2006 | rttdcs &= ~IXGBE_RTTDCS_ARBDIS; | 2103 | rttdcs &= ~IXGBE_RTTDCS_ARBDIS; |
@@ -2059,12 +2156,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2059 | #ifdef CONFIG_IXGBE_DCB | 2156 | #ifdef CONFIG_IXGBE_DCB |
2060 | | IXGBE_FLAG_DCB_ENABLED | 2157 | | IXGBE_FLAG_DCB_ENABLED |
2061 | #endif | 2158 | #endif |
2159 | | IXGBE_FLAG_SRIOV_ENABLED | ||
2062 | ); | 2160 | ); |
2063 | 2161 | ||
2064 | switch (mask) { | 2162 | switch (mask) { |
2065 | case (IXGBE_FLAG_RSS_ENABLED): | 2163 | case (IXGBE_FLAG_RSS_ENABLED): |
2066 | mrqc = IXGBE_MRQC_RSSEN; | 2164 | mrqc = IXGBE_MRQC_RSSEN; |
2067 | break; | 2165 | break; |
2166 | case (IXGBE_FLAG_SRIOV_ENABLED): | ||
2167 | mrqc = IXGBE_MRQC_VMDQEN; | ||
2168 | break; | ||
2068 | #ifdef CONFIG_IXGBE_DCB | 2169 | #ifdef CONFIG_IXGBE_DCB |
2069 | case (IXGBE_FLAG_DCB_ENABLED): | 2170 | case (IXGBE_FLAG_DCB_ENABLED): |
2070 | mrqc = IXGBE_MRQC_RT8TCEN; | 2171 | mrqc = IXGBE_MRQC_RT8TCEN; |
@@ -2090,7 +2191,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index) | |||
2090 | u32 rscctrl; | 2191 | u32 rscctrl; |
2091 | int rx_buf_len; | 2192 | int rx_buf_len; |
2092 | 2193 | ||
2093 | rx_ring = &adapter->rx_ring[index]; | 2194 | rx_ring = adapter->rx_ring[index]; |
2094 | j = rx_ring->reg_idx; | 2195 | j = rx_ring->reg_idx; |
2095 | rx_buf_len = rx_ring->rx_buf_len; | 2196 | rx_buf_len = rx_ring->rx_buf_len; |
2096 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); | 2197 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); |
@@ -2145,7 +2246,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2145 | int rx_buf_len; | 2246 | int rx_buf_len; |
2146 | 2247 | ||
2147 | /* Decide whether to use packet split mode or not */ | 2248 | /* Decide whether to use packet split mode or not */ |
2148 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | 2249 | /* Do not use packet split if we're in SR-IOV Mode */ |
2250 | if (!adapter->num_vfs) | ||
2251 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | ||
2149 | 2252 | ||
2150 | /* Set the RX buffer length according to the mode */ | 2253 | /* Set the RX buffer length according to the mode */ |
2151 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 2254 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
@@ -2157,7 +2260,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2157 | IXGBE_PSRTYPE_IPV4HDR | | 2260 | IXGBE_PSRTYPE_IPV4HDR | |
2158 | IXGBE_PSRTYPE_IPV6HDR | | 2261 | IXGBE_PSRTYPE_IPV6HDR | |
2159 | IXGBE_PSRTYPE_L2HDR; | 2262 | IXGBE_PSRTYPE_L2HDR; |
2160 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); | 2263 | IXGBE_WRITE_REG(hw, |
2264 | IXGBE_PSRTYPE(adapter->num_vfs), | ||
2265 | psrtype); | ||
2161 | } | 2266 | } |
2162 | } else { | 2267 | } else { |
2163 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && | 2268 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && |
@@ -2184,7 +2289,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2184 | #endif | 2289 | #endif |
2185 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); | 2290 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); |
2186 | 2291 | ||
2187 | rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); | 2292 | rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc); |
2188 | /* disable receives while setting up the descriptors */ | 2293 | /* disable receives while setting up the descriptors */ |
2189 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 2294 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
2190 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | 2295 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
@@ -2194,7 +2299,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2194 | * the Base and Length of the Rx Descriptor Ring | 2299 | * the Base and Length of the Rx Descriptor Ring |
2195 | */ | 2300 | */ |
2196 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2301 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2197 | rx_ring = &adapter->rx_ring[i]; | 2302 | rx_ring = adapter->rx_ring[i]; |
2198 | rdba = rx_ring->dma; | 2303 | rdba = rx_ring->dma; |
2199 | j = rx_ring->reg_idx; | 2304 | j = rx_ring->reg_idx; |
2200 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); | 2305 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); |
@@ -2243,6 +2348,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2243 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); | 2348 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); |
2244 | } | 2349 | } |
2245 | 2350 | ||
2351 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | ||
2352 | u32 vt_reg_bits; | ||
2353 | u32 reg_offset, vf_shift; | ||
2354 | u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | ||
2355 | vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | ||
2356 | | IXGBE_VT_CTL_REPLEN; | ||
2357 | vt_reg_bits |= (adapter->num_vfs << | ||
2358 | IXGBE_VT_CTL_POOL_SHIFT); | ||
2359 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); | ||
2360 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0); | ||
2361 | |||
2362 | vf_shift = adapter->num_vfs % 32; | ||
2363 | reg_offset = adapter->num_vfs / 32; | ||
2364 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); | ||
2365 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); | ||
2366 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); | ||
2367 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); | ||
2368 | /* Enable only the PF's pool for Tx/Rx */ | ||
2369 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); | ||
2370 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); | ||
2371 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | ||
2372 | ixgbe_set_vmolr(hw, adapter->num_vfs); | ||
2373 | } | ||
2374 | |||
2246 | /* Program MRQC for the distribution of queues */ | 2375 | /* Program MRQC for the distribution of queues */ |
2247 | mrqc = ixgbe_setup_mrqc(adapter); | 2376 | mrqc = ixgbe_setup_mrqc(adapter); |
2248 | 2377 | ||
@@ -2274,6 +2403,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2274 | } | 2403 | } |
2275 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | 2404 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
2276 | 2405 | ||
2406 | if (adapter->num_vfs) { | ||
2407 | u32 reg; | ||
2408 | |||
2409 | /* Map PF MAC address in RAR Entry 0 to first pool | ||
2410 | * following VFs */ | ||
2411 | hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); | ||
2412 | |||
2413 | /* Set up VF register offsets for selected VT Mode, i.e. | ||
2414 | * 64 VFs for SR-IOV */ | ||
2415 | reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | ||
2416 | reg |= IXGBE_GCR_EXT_SRIOV; | ||
2417 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg); | ||
2418 | } | ||
2419 | |||
2277 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); | 2420 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); |
2278 | 2421 | ||
2279 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || | 2422 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || |
@@ -2312,15 +2455,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
2312 | { | 2455 | { |
2313 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 2456 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
2314 | struct ixgbe_hw *hw = &adapter->hw; | 2457 | struct ixgbe_hw *hw = &adapter->hw; |
2458 | int pool_ndx = adapter->num_vfs; | ||
2315 | 2459 | ||
2316 | /* add VID to filter table */ | 2460 | /* add VID to filter table */ |
2317 | hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true); | 2461 | hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); |
2318 | } | 2462 | } |
2319 | 2463 | ||
2320 | static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | 2464 | static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
2321 | { | 2465 | { |
2322 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 2466 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
2323 | struct ixgbe_hw *hw = &adapter->hw; | 2467 | struct ixgbe_hw *hw = &adapter->hw; |
2468 | int pool_ndx = adapter->num_vfs; | ||
2324 | 2469 | ||
2325 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 2470 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
2326 | ixgbe_irq_disable(adapter); | 2471 | ixgbe_irq_disable(adapter); |
@@ -2331,7 +2476,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
2331 | ixgbe_irq_enable(adapter); | 2476 | ixgbe_irq_enable(adapter); |
2332 | 2477 | ||
2333 | /* remove VID from filter table */ | 2478 | /* remove VID from filter table */ |
2334 | hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false); | 2479 | hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); |
2335 | } | 2480 | } |
2336 | 2481 | ||
2337 | static void ixgbe_vlan_rx_register(struct net_device *netdev, | 2482 | static void ixgbe_vlan_rx_register(struct net_device *netdev, |
@@ -2361,7 +2506,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev, | |||
2361 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 2506 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
2362 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2507 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2363 | u32 ctrl; | 2508 | u32 ctrl; |
2364 | j = adapter->rx_ring[i].reg_idx; | 2509 | j = adapter->rx_ring[i]->reg_idx; |
2365 | ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j)); | 2510 | ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j)); |
2366 | ctrl |= IXGBE_RXDCTL_VME; | 2511 | ctrl |= IXGBE_RXDCTL_VME; |
2367 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl); | 2512 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl); |
@@ -2414,7 +2559,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) | |||
2414 | * responsible for configuring the hardware for proper unicast, multicast and | 2559 | * responsible for configuring the hardware for proper unicast, multicast and |
2415 | * promiscuous mode. | 2560 | * promiscuous mode. |
2416 | **/ | 2561 | **/ |
2417 | static void ixgbe_set_rx_mode(struct net_device *netdev) | 2562 | void ixgbe_set_rx_mode(struct net_device *netdev) |
2418 | { | 2563 | { |
2419 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 2564 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
2420 | struct ixgbe_hw *hw = &adapter->hw; | 2565 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -2446,14 +2591,16 @@ static void ixgbe_set_rx_mode(struct net_device *netdev) | |||
2446 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 2591 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
2447 | 2592 | ||
2448 | /* reprogram secondary unicast list */ | 2593 | /* reprogram secondary unicast list */ |
2449 | hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list); | 2594 | hw->mac.ops.update_uc_addr_list(hw, netdev); |
2450 | 2595 | ||
2451 | /* reprogram multicast list */ | 2596 | /* reprogram multicast list */ |
2452 | addr_count = netdev->mc_count; | 2597 | addr_count = netdev_mc_count(netdev); |
2453 | if (addr_count) | 2598 | if (addr_count) |
2454 | addr_list = netdev->mc_list->dmi_addr; | 2599 | addr_list = netdev->mc_list->dmi_addr; |
2455 | hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, | 2600 | hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, |
2456 | ixgbe_addr_list_itr); | 2601 | ixgbe_addr_list_itr); |
2602 | if (adapter->num_vfs) | ||
2603 | ixgbe_restore_vf_multicasts(adapter); | ||
2457 | } | 2604 | } |
2458 | 2605 | ||
2459 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) | 2606 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) |
@@ -2522,7 +2669,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
2522 | ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); | 2669 | ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); |
2523 | 2670 | ||
2524 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2671 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2525 | j = adapter->tx_ring[i].reg_idx; | 2672 | j = adapter->tx_ring[i]->reg_idx; |
2526 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 2673 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); |
2527 | /* PThresh workaround for Tx hang with DFP enabled. */ | 2674 | /* PThresh workaround for Tx hang with DFP enabled. */ |
2528 | txdctl |= 32; | 2675 | txdctl |= 32; |
@@ -2539,7 +2686,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
2539 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | 2686 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; |
2540 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 2687 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
2541 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2688 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2542 | j = adapter->rx_ring[i].reg_idx; | 2689 | j = adapter->rx_ring[i]->reg_idx; |
2543 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | 2690 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
2544 | vlnctrl |= IXGBE_RXDCTL_VME; | 2691 | vlnctrl |= IXGBE_RXDCTL_VME; |
2545 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); | 2692 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); |
@@ -2579,7 +2726,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
2579 | #endif /* IXGBE_FCOE */ | 2726 | #endif /* IXGBE_FCOE */ |
2580 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | 2727 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
2581 | for (i = 0; i < adapter->num_tx_queues; i++) | 2728 | for (i = 0; i < adapter->num_tx_queues; i++) |
2582 | adapter->tx_ring[i].atr_sample_rate = | 2729 | adapter->tx_ring[i]->atr_sample_rate = |
2583 | adapter->atr_sample_rate; | 2730 | adapter->atr_sample_rate; |
2584 | ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); | 2731 | ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); |
2585 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { | 2732 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { |
@@ -2589,8 +2736,8 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
2589 | ixgbe_configure_tx(adapter); | 2736 | ixgbe_configure_tx(adapter); |
2590 | ixgbe_configure_rx(adapter); | 2737 | ixgbe_configure_rx(adapter); |
2591 | for (i = 0; i < adapter->num_rx_queues; i++) | 2738 | for (i = 0; i < adapter->num_rx_queues; i++) |
2592 | ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], | 2739 | ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i], |
2593 | (adapter->rx_ring[i].count - 1)); | 2740 | (adapter->rx_ring[i]->count - 1)); |
2594 | } | 2741 | } |
2595 | 2742 | ||
2596 | static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) | 2743 | static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) |
@@ -2673,7 +2820,7 @@ link_cfg_out: | |||
2673 | static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | 2820 | static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, |
2674 | int rxr) | 2821 | int rxr) |
2675 | { | 2822 | { |
2676 | int j = adapter->rx_ring[rxr].reg_idx; | 2823 | int j = adapter->rx_ring[rxr]->reg_idx; |
2677 | int k; | 2824 | int k; |
2678 | 2825 | ||
2679 | for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { | 2826 | for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { |
@@ -2687,8 +2834,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | |||
2687 | DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " | 2834 | DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " |
2688 | "not set within the polling period\n", rxr); | 2835 | "not set within the polling period\n", rxr); |
2689 | } | 2836 | } |
2690 | ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], | 2837 | ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr], |
2691 | (adapter->rx_ring[rxr].count - 1)); | 2838 | (adapter->rx_ring[rxr]->count - 1)); |
2692 | } | 2839 | } |
2693 | 2840 | ||
2694 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | 2841 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) |
@@ -2702,6 +2849,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2702 | u32 txdctl, rxdctl, mhadd; | 2849 | u32 txdctl, rxdctl, mhadd; |
2703 | u32 dmatxctl; | 2850 | u32 dmatxctl; |
2704 | u32 gpie; | 2851 | u32 gpie; |
2852 | u32 ctrl_ext; | ||
2705 | 2853 | ||
2706 | ixgbe_get_hw_control(adapter); | 2854 | ixgbe_get_hw_control(adapter); |
2707 | 2855 | ||
@@ -2714,6 +2862,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2714 | /* MSI only */ | 2862 | /* MSI only */ |
2715 | gpie = 0; | 2863 | gpie = 0; |
2716 | } | 2864 | } |
2865 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | ||
2866 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; | ||
2867 | gpie |= IXGBE_GPIE_VTMODE_64; | ||
2868 | } | ||
2717 | /* XXX: to interrupt immediately for EICS writes, enable this */ | 2869 | /* XXX: to interrupt immediately for EICS writes, enable this */ |
2718 | /* gpie |= IXGBE_GPIE_EIMEN; */ | 2870 | /* gpie |= IXGBE_GPIE_EIMEN; */ |
2719 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | 2871 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); |
@@ -2770,7 +2922,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2770 | } | 2922 | } |
2771 | 2923 | ||
2772 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2924 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2773 | j = adapter->tx_ring[i].reg_idx; | 2925 | j = adapter->tx_ring[i]->reg_idx; |
2774 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 2926 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); |
2775 | /* enable WTHRESH=8 descriptors, to encourage burst writeback */ | 2927 | /* enable WTHRESH=8 descriptors, to encourage burst writeback */ |
2776 | txdctl |= (8 << 16); | 2928 | txdctl |= (8 << 16); |
@@ -2784,14 +2936,26 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2784 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); | 2936 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); |
2785 | } | 2937 | } |
2786 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2938 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2787 | j = adapter->tx_ring[i].reg_idx; | 2939 | j = adapter->tx_ring[i]->reg_idx; |
2788 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 2940 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); |
2789 | txdctl |= IXGBE_TXDCTL_ENABLE; | 2941 | txdctl |= IXGBE_TXDCTL_ENABLE; |
2790 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); | 2942 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); |
2943 | if (hw->mac.type == ixgbe_mac_82599EB) { | ||
2944 | int wait_loop = 10; | ||
2945 | /* poll for Tx Enable ready */ | ||
2946 | do { | ||
2947 | msleep(1); | ||
2948 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | ||
2949 | } while (--wait_loop && | ||
2950 | !(txdctl & IXGBE_TXDCTL_ENABLE)); | ||
2951 | if (!wait_loop) | ||
2952 | DPRINTK(DRV, ERR, "Could not enable " | ||
2953 | "Tx Queue %d\n", j); | ||
2954 | } | ||
2791 | } | 2955 | } |
2792 | 2956 | ||
2793 | for (i = 0; i < num_rx_rings; i++) { | 2957 | for (i = 0; i < num_rx_rings; i++) { |
2794 | j = adapter->rx_ring[i].reg_idx; | 2958 | j = adapter->rx_ring[i]->reg_idx; |
2795 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | 2959 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
2796 | /* enable PTHRESH=32 descriptors (half the internal cache) | 2960 | /* enable PTHRESH=32 descriptors (half the internal cache) |
2797 | * and HTHRESH=0 descriptors (to minimize latency on fetch), | 2961 | * and HTHRESH=0 descriptors (to minimize latency on fetch), |
@@ -2865,7 +3029,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2865 | 3029 | ||
2866 | for (i = 0; i < adapter->num_tx_queues; i++) | 3030 | for (i = 0; i < adapter->num_tx_queues; i++) |
2867 | set_bit(__IXGBE_FDIR_INIT_DONE, | 3031 | set_bit(__IXGBE_FDIR_INIT_DONE, |
2868 | &(adapter->tx_ring[i].reinit_state)); | 3032 | &(adapter->tx_ring[i]->reinit_state)); |
2869 | 3033 | ||
2870 | /* enable transmits */ | 3034 | /* enable transmits */ |
2871 | netif_tx_start_all_queues(netdev); | 3035 | netif_tx_start_all_queues(netdev); |
@@ -2875,6 +3039,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2875 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | 3039 | adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; |
2876 | adapter->link_check_timeout = jiffies; | 3040 | adapter->link_check_timeout = jiffies; |
2877 | mod_timer(&adapter->watchdog_timer, jiffies); | 3041 | mod_timer(&adapter->watchdog_timer, jiffies); |
3042 | |||
3043 | /* Set PF Reset Done bit so PF/VF Mail Ops can work */ | ||
3044 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | ||
3045 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; | ||
3046 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); | ||
3047 | |||
2878 | return 0; | 3048 | return 0; |
2879 | } | 3049 | } |
2880 | 3050 | ||
@@ -2923,7 +3093,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) | |||
2923 | } | 3093 | } |
2924 | 3094 | ||
2925 | /* reprogram the RAR[0] in case user changed it. */ | 3095 | /* reprogram the RAR[0] in case user changed it. */ |
2926 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | 3096 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, |
3097 | IXGBE_RAH_AV); | ||
2927 | } | 3098 | } |
2928 | 3099 | ||
2929 | /** | 3100 | /** |
@@ -2955,6 +3126,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
2955 | rx_buffer_info->skb = NULL; | 3126 | rx_buffer_info->skb = NULL; |
2956 | do { | 3127 | do { |
2957 | struct sk_buff *this = skb; | 3128 | struct sk_buff *this = skb; |
3129 | if (IXGBE_RSC_CB(this)->dma) | ||
3130 | pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, | ||
3131 | rx_ring->rx_buf_len, | ||
3132 | PCI_DMA_FROMDEVICE); | ||
2958 | skb = skb->prev; | 3133 | skb = skb->prev; |
2959 | dev_kfree_skb(this); | 3134 | dev_kfree_skb(this); |
2960 | } while (skb); | 3135 | } while (skb); |
@@ -3029,7 +3204,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) | |||
3029 | int i; | 3204 | int i; |
3030 | 3205 | ||
3031 | for (i = 0; i < adapter->num_rx_queues; i++) | 3206 | for (i = 0; i < adapter->num_rx_queues; i++) |
3032 | ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); | 3207 | ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]); |
3033 | } | 3208 | } |
3034 | 3209 | ||
3035 | /** | 3210 | /** |
@@ -3041,7 +3216,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) | |||
3041 | int i; | 3216 | int i; |
3042 | 3217 | ||
3043 | for (i = 0; i < adapter->num_tx_queues; i++) | 3218 | for (i = 0; i < adapter->num_tx_queues; i++) |
3044 | ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); | 3219 | ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]); |
3045 | } | 3220 | } |
3046 | 3221 | ||
3047 | void ixgbe_down(struct ixgbe_adapter *adapter) | 3222 | void ixgbe_down(struct ixgbe_adapter *adapter) |
@@ -3055,6 +3230,17 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3055 | /* signal that we are down to the interrupt handler */ | 3230 | /* signal that we are down to the interrupt handler */ |
3056 | set_bit(__IXGBE_DOWN, &adapter->state); | 3231 | set_bit(__IXGBE_DOWN, &adapter->state); |
3057 | 3232 | ||
3233 | /* disable receive for all VFs and wait one second */ | ||
3234 | if (adapter->num_vfs) { | ||
3235 | for (i = 0 ; i < adapter->num_vfs; i++) | ||
3236 | adapter->vfinfo[i].clear_to_send = 0; | ||
3237 | |||
3238 | /* ping all the active vfs to let them know we are going down */ | ||
3239 | ixgbe_ping_all_vfs(adapter); | ||
3240 | /* Disable all VFTE/VFRE TX/RX */ | ||
3241 | ixgbe_disable_tx_rx(adapter); | ||
3242 | } | ||
3243 | |||
3058 | /* disable receives */ | 3244 | /* disable receives */ |
3059 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 3245 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
3060 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | 3246 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
@@ -3081,7 +3267,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3081 | 3267 | ||
3082 | /* disable transmits in the hardware now that interrupts are off */ | 3268 | /* disable transmits in the hardware now that interrupts are off */ |
3083 | for (i = 0; i < adapter->num_tx_queues; i++) { | 3269 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3084 | j = adapter->tx_ring[i].reg_idx; | 3270 | j = adapter->tx_ring[i]->reg_idx; |
3085 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 3271 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); |
3086 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), | 3272 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), |
3087 | (txdctl & ~IXGBE_TXDCTL_ENABLE)); | 3273 | (txdctl & ~IXGBE_TXDCTL_ENABLE)); |
@@ -3094,6 +3280,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3094 | 3280 | ||
3095 | netif_carrier_off(netdev); | 3281 | netif_carrier_off(netdev); |
3096 | 3282 | ||
3283 | /* clear n-tuple filters that are cached */ | ||
3284 | ethtool_ntuple_flush(netdev); | ||
3285 | |||
3097 | if (!pci_channel_offline(adapter->pdev)) | 3286 | if (!pci_channel_offline(adapter->pdev)) |
3098 | ixgbe_reset(adapter); | 3287 | ixgbe_reset(adapter); |
3099 | ixgbe_clean_all_tx_rings(adapter); | 3288 | ixgbe_clean_all_tx_rings(adapter); |
@@ -3121,13 +3310,13 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) | |||
3121 | 3310 | ||
3122 | #ifdef CONFIG_IXGBE_DCA | 3311 | #ifdef CONFIG_IXGBE_DCA |
3123 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { | 3312 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { |
3124 | ixgbe_update_tx_dca(adapter, adapter->tx_ring); | 3313 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]); |
3125 | ixgbe_update_rx_dca(adapter, adapter->rx_ring); | 3314 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]); |
3126 | } | 3315 | } |
3127 | #endif | 3316 | #endif |
3128 | 3317 | ||
3129 | tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring); | 3318 | tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); |
3130 | ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); | 3319 | ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget); |
3131 | 3320 | ||
3132 | if (!tx_clean_complete) | 3321 | if (!tx_clean_complete) |
3133 | work_done = budget; | 3322 | work_done = budget; |
@@ -3291,6 +3480,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | |||
3291 | } | 3480 | } |
3292 | 3481 | ||
3293 | #endif /* IXGBE_FCOE */ | 3482 | #endif /* IXGBE_FCOE */ |
3483 | /** | ||
3484 | * ixgbe_set_sriov_queues: Allocate queues for IOV use | ||
3485 | * @adapter: board private structure to initialize | ||
3486 | * | ||
3487 | * IOV doesn't actually use anything, so just NAK the | ||
3488 | * request for now and let the other queue routines | ||
3489 | * figure out what to do. | ||
3490 | */ | ||
3491 | static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | ||
3492 | { | ||
3493 | return false; | ||
3494 | } | ||
3495 | |||
3294 | /* | 3496 | /* |
3295 | * ixgbe_set_num_queues: Allocate queues for device, feature dependant | 3497 | * ixgbe_set_num_queues: Allocate queues for device, feature dependant |
3296 | * @adapter: board private structure to initialize | 3498 | * @adapter: board private structure to initialize |
@@ -3304,6 +3506,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | |||
3304 | **/ | 3506 | **/ |
3305 | static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | 3507 | static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) |
3306 | { | 3508 | { |
3509 | /* Start with base case */ | ||
3510 | adapter->num_rx_queues = 1; | ||
3511 | adapter->num_tx_queues = 1; | ||
3512 | adapter->num_rx_pools = adapter->num_rx_queues; | ||
3513 | adapter->num_rx_queues_per_pool = 1; | ||
3514 | |||
3515 | if (ixgbe_set_sriov_queues(adapter)) | ||
3516 | return; | ||
3517 | |||
3307 | #ifdef IXGBE_FCOE | 3518 | #ifdef IXGBE_FCOE |
3308 | if (ixgbe_set_fcoe_queues(adapter)) | 3519 | if (ixgbe_set_fcoe_queues(adapter)) |
3309 | goto done; | 3520 | goto done; |
@@ -3393,9 +3604,9 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) | |||
3393 | 3604 | ||
3394 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 3605 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
3395 | for (i = 0; i < adapter->num_rx_queues; i++) | 3606 | for (i = 0; i < adapter->num_rx_queues; i++) |
3396 | adapter->rx_ring[i].reg_idx = i; | 3607 | adapter->rx_ring[i]->reg_idx = i; |
3397 | for (i = 0; i < adapter->num_tx_queues; i++) | 3608 | for (i = 0; i < adapter->num_tx_queues; i++) |
3398 | adapter->tx_ring[i].reg_idx = i; | 3609 | adapter->tx_ring[i]->reg_idx = i; |
3399 | ret = true; | 3610 | ret = true; |
3400 | } else { | 3611 | } else { |
3401 | ret = false; | 3612 | ret = false; |
@@ -3422,8 +3633,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
3422 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 3633 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
3423 | /* the number of queues is assumed to be symmetric */ | 3634 | /* the number of queues is assumed to be symmetric */ |
3424 | for (i = 0; i < dcb_i; i++) { | 3635 | for (i = 0; i < dcb_i; i++) { |
3425 | adapter->rx_ring[i].reg_idx = i << 3; | 3636 | adapter->rx_ring[i]->reg_idx = i << 3; |
3426 | adapter->tx_ring[i].reg_idx = i << 2; | 3637 | adapter->tx_ring[i]->reg_idx = i << 2; |
3427 | } | 3638 | } |
3428 | ret = true; | 3639 | ret = true; |
3429 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 3640 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
@@ -3441,18 +3652,18 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
3441 | * Rx TC0-TC7 are offset by 16 queues each | 3652 | * Rx TC0-TC7 are offset by 16 queues each |
3442 | */ | 3653 | */ |
3443 | for (i = 0; i < 3; i++) { | 3654 | for (i = 0; i < 3; i++) { |
3444 | adapter->tx_ring[i].reg_idx = i << 5; | 3655 | adapter->tx_ring[i]->reg_idx = i << 5; |
3445 | adapter->rx_ring[i].reg_idx = i << 4; | 3656 | adapter->rx_ring[i]->reg_idx = i << 4; |
3446 | } | 3657 | } |
3447 | for ( ; i < 5; i++) { | 3658 | for ( ; i < 5; i++) { |
3448 | adapter->tx_ring[i].reg_idx = | 3659 | adapter->tx_ring[i]->reg_idx = |
3449 | ((i + 2) << 4); | 3660 | ((i + 2) << 4); |
3450 | adapter->rx_ring[i].reg_idx = i << 4; | 3661 | adapter->rx_ring[i]->reg_idx = i << 4; |
3451 | } | 3662 | } |
3452 | for ( ; i < dcb_i; i++) { | 3663 | for ( ; i < dcb_i; i++) { |
3453 | adapter->tx_ring[i].reg_idx = | 3664 | adapter->tx_ring[i]->reg_idx = |
3454 | ((i + 8) << 3); | 3665 | ((i + 8) << 3); |
3455 | adapter->rx_ring[i].reg_idx = i << 4; | 3666 | adapter->rx_ring[i]->reg_idx = i << 4; |
3456 | } | 3667 | } |
3457 | 3668 | ||
3458 | ret = true; | 3669 | ret = true; |
@@ -3465,12 +3676,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
3465 | * | 3676 | * |
3466 | * Rx TC0-TC3 are offset by 32 queues each | 3677 | * Rx TC0-TC3 are offset by 32 queues each |
3467 | */ | 3678 | */ |
3468 | adapter->tx_ring[0].reg_idx = 0; | 3679 | adapter->tx_ring[0]->reg_idx = 0; |
3469 | adapter->tx_ring[1].reg_idx = 64; | 3680 | adapter->tx_ring[1]->reg_idx = 64; |
3470 | adapter->tx_ring[2].reg_idx = 96; | 3681 | adapter->tx_ring[2]->reg_idx = 96; |
3471 | adapter->tx_ring[3].reg_idx = 112; | 3682 | adapter->tx_ring[3]->reg_idx = 112; |
3472 | for (i = 0 ; i < dcb_i; i++) | 3683 | for (i = 0 ; i < dcb_i; i++) |
3473 | adapter->rx_ring[i].reg_idx = i << 5; | 3684 | adapter->rx_ring[i]->reg_idx = i << 5; |
3474 | 3685 | ||
3475 | ret = true; | 3686 | ret = true; |
3476 | } else { | 3687 | } else { |
@@ -3503,9 +3714,9 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | |||
3503 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | 3714 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || |
3504 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { | 3715 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { |
3505 | for (i = 0; i < adapter->num_rx_queues; i++) | 3716 | for (i = 0; i < adapter->num_rx_queues; i++) |
3506 | adapter->rx_ring[i].reg_idx = i; | 3717 | adapter->rx_ring[i]->reg_idx = i; |
3507 | for (i = 0; i < adapter->num_tx_queues; i++) | 3718 | for (i = 0; i < adapter->num_tx_queues; i++) |
3508 | adapter->tx_ring[i].reg_idx = i; | 3719 | adapter->tx_ring[i]->reg_idx = i; |
3509 | ret = true; | 3720 | ret = true; |
3510 | } | 3721 | } |
3511 | 3722 | ||
@@ -3533,8 +3744,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | |||
3533 | 3744 | ||
3534 | ixgbe_cache_ring_dcb(adapter); | 3745 | ixgbe_cache_ring_dcb(adapter); |
3535 | /* find out queues in TC for FCoE */ | 3746 | /* find out queues in TC for FCoE */ |
3536 | fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1; | 3747 | fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; |
3537 | fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1; | 3748 | fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; |
3538 | /* | 3749 | /* |
3539 | * In 82599, the number of Tx queues for each traffic | 3750 | * In 82599, the number of Tx queues for each traffic |
3540 | * class for both 8-TC and 4-TC modes are: | 3751 | * class for both 8-TC and 4-TC modes are: |
@@ -3565,8 +3776,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | |||
3565 | fcoe_tx_i = f->mask; | 3776 | fcoe_tx_i = f->mask; |
3566 | } | 3777 | } |
3567 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { | 3778 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { |
3568 | adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i; | 3779 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; |
3569 | adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i; | 3780 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; |
3570 | } | 3781 | } |
3571 | ret = true; | 3782 | ret = true; |
3572 | } | 3783 | } |
@@ -3575,6 +3786,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | |||
3575 | 3786 | ||
3576 | #endif /* IXGBE_FCOE */ | 3787 | #endif /* IXGBE_FCOE */ |
3577 | /** | 3788 | /** |
3789 | * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov | ||
3790 | * @adapter: board private structure to initialize | ||
3791 | * | ||
3792 | * SR-IOV doesn't use any descriptor rings but changes the default if | ||
3793 | * no other mapping is used. | ||
3794 | * | ||
3795 | */ | ||
3796 | static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) | ||
3797 | { | ||
3798 | adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
3799 | adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
3800 | if (adapter->num_vfs) | ||
3801 | return true; | ||
3802 | else | ||
3803 | return false; | ||
3804 | } | ||
3805 | |||
3806 | /** | ||
3578 | * ixgbe_cache_ring_register - Descriptor ring to register mapping | 3807 | * ixgbe_cache_ring_register - Descriptor ring to register mapping |
3579 | * @adapter: board private structure to initialize | 3808 | * @adapter: board private structure to initialize |
3580 | * | 3809 | * |
@@ -3588,8 +3817,11 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | |||
3588 | static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | 3817 | static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) |
3589 | { | 3818 | { |
3590 | /* start with default case */ | 3819 | /* start with default case */ |
3591 | adapter->rx_ring[0].reg_idx = 0; | 3820 | adapter->rx_ring[0]->reg_idx = 0; |
3592 | adapter->tx_ring[0].reg_idx = 0; | 3821 | adapter->tx_ring[0]->reg_idx = 0; |
3822 | |||
3823 | if (ixgbe_cache_ring_sriov(adapter)) | ||
3824 | return; | ||
3593 | 3825 | ||
3594 | #ifdef IXGBE_FCOE | 3826 | #ifdef IXGBE_FCOE |
3595 | if (ixgbe_cache_ring_fcoe(adapter)) | 3827 | if (ixgbe_cache_ring_fcoe(adapter)) |
@@ -3619,33 +3851,63 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |||
3619 | static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) | 3851 | static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) |
3620 | { | 3852 | { |
3621 | int i; | 3853 | int i; |
3622 | 3854 | int orig_node = adapter->node; | |
3623 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, | ||
3624 | sizeof(struct ixgbe_ring), GFP_KERNEL); | ||
3625 | if (!adapter->tx_ring) | ||
3626 | goto err_tx_ring_allocation; | ||
3627 | |||
3628 | adapter->rx_ring = kcalloc(adapter->num_rx_queues, | ||
3629 | sizeof(struct ixgbe_ring), GFP_KERNEL); | ||
3630 | if (!adapter->rx_ring) | ||
3631 | goto err_rx_ring_allocation; | ||
3632 | 3855 | ||
3633 | for (i = 0; i < adapter->num_tx_queues; i++) { | 3856 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3634 | adapter->tx_ring[i].count = adapter->tx_ring_count; | 3857 | struct ixgbe_ring *ring = adapter->tx_ring[i]; |
3635 | adapter->tx_ring[i].queue_index = i; | 3858 | if (orig_node == -1) { |
3859 | int cur_node = next_online_node(adapter->node); | ||
3860 | if (cur_node == MAX_NUMNODES) | ||
3861 | cur_node = first_online_node; | ||
3862 | adapter->node = cur_node; | ||
3863 | } | ||
3864 | ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, | ||
3865 | adapter->node); | ||
3866 | if (!ring) | ||
3867 | ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); | ||
3868 | if (!ring) | ||
3869 | goto err_tx_ring_allocation; | ||
3870 | ring->count = adapter->tx_ring_count; | ||
3871 | ring->queue_index = i; | ||
3872 | ring->numa_node = adapter->node; | ||
3873 | |||
3874 | adapter->tx_ring[i] = ring; | ||
3636 | } | 3875 | } |
3637 | 3876 | ||
3877 | /* Restore the adapter's original node */ | ||
3878 | adapter->node = orig_node; | ||
3879 | |||
3638 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3880 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3639 | adapter->rx_ring[i].count = adapter->rx_ring_count; | 3881 | struct ixgbe_ring *ring = adapter->rx_ring[i]; |
3640 | adapter->rx_ring[i].queue_index = i; | 3882 | if (orig_node == -1) { |
3883 | int cur_node = next_online_node(adapter->node); | ||
3884 | if (cur_node == MAX_NUMNODES) | ||
3885 | cur_node = first_online_node; | ||
3886 | adapter->node = cur_node; | ||
3887 | } | ||
3888 | ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, | ||
3889 | adapter->node); | ||
3890 | if (!ring) | ||
3891 | ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); | ||
3892 | if (!ring) | ||
3893 | goto err_rx_ring_allocation; | ||
3894 | ring->count = adapter->rx_ring_count; | ||
3895 | ring->queue_index = i; | ||
3896 | ring->numa_node = adapter->node; | ||
3897 | |||
3898 | adapter->rx_ring[i] = ring; | ||
3641 | } | 3899 | } |
3642 | 3900 | ||
3901 | /* Restore the adapter's original node */ | ||
3902 | adapter->node = orig_node; | ||
3903 | |||
3643 | ixgbe_cache_ring_register(adapter); | 3904 | ixgbe_cache_ring_register(adapter); |
3644 | 3905 | ||
3645 | return 0; | 3906 | return 0; |
3646 | 3907 | ||
3647 | err_rx_ring_allocation: | 3908 | err_rx_ring_allocation: |
3648 | kfree(adapter->tx_ring); | 3909 | for (i = 0; i < adapter->num_tx_queues; i++) |
3910 | kfree(adapter->tx_ring[i]); | ||
3649 | err_tx_ring_allocation: | 3911 | err_tx_ring_allocation: |
3650 | return -ENOMEM; | 3912 | return -ENOMEM; |
3651 | } | 3913 | } |
@@ -3700,6 +3962,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
3700 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 3962 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
3701 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 3963 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; |
3702 | adapter->atr_sample_rate = 0; | 3964 | adapter->atr_sample_rate = 0; |
3965 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
3966 | ixgbe_disable_sriov(adapter); | ||
3967 | |||
3703 | ixgbe_set_num_queues(adapter); | 3968 | ixgbe_set_num_queues(adapter); |
3704 | 3969 | ||
3705 | err = pci_enable_msi(adapter->pdev); | 3970 | err = pci_enable_msi(adapter->pdev); |
@@ -3741,7 +4006,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | |||
3741 | } | 4006 | } |
3742 | 4007 | ||
3743 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | 4008 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { |
3744 | q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL); | 4009 | q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), |
4010 | GFP_KERNEL, adapter->node); | ||
4011 | if (!q_vector) | ||
4012 | q_vector = kzalloc(sizeof(struct ixgbe_q_vector), | ||
4013 | GFP_KERNEL); | ||
3745 | if (!q_vector) | 4014 | if (!q_vector) |
3746 | goto err_out; | 4015 | goto err_out; |
3747 | q_vector->adapter = adapter; | 4016 | q_vector->adapter = adapter; |
@@ -3868,10 +4137,16 @@ err_set_interrupt: | |||
3868 | **/ | 4137 | **/ |
3869 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | 4138 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) |
3870 | { | 4139 | { |
3871 | kfree(adapter->tx_ring); | 4140 | int i; |
3872 | kfree(adapter->rx_ring); | 4141 | |
3873 | adapter->tx_ring = NULL; | 4142 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3874 | adapter->rx_ring = NULL; | 4143 | kfree(adapter->tx_ring[i]); |
4144 | adapter->tx_ring[i] = NULL; | ||
4145 | } | ||
4146 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
4147 | kfree(adapter->rx_ring[i]); | ||
4148 | adapter->rx_ring[i] = NULL; | ||
4149 | } | ||
3875 | 4150 | ||
3876 | ixgbe_free_q_vectors(adapter); | 4151 | ixgbe_free_q_vectors(adapter); |
3877 | ixgbe_reset_interrupt_capability(adapter); | 4152 | ixgbe_reset_interrupt_capability(adapter); |
@@ -3942,6 +4217,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3942 | { | 4217 | { |
3943 | struct ixgbe_hw *hw = &adapter->hw; | 4218 | struct ixgbe_hw *hw = &adapter->hw; |
3944 | struct pci_dev *pdev = adapter->pdev; | 4219 | struct pci_dev *pdev = adapter->pdev; |
4220 | struct net_device *dev = adapter->netdev; | ||
3945 | unsigned int rss; | 4221 | unsigned int rss; |
3946 | #ifdef CONFIG_IXGBE_DCB | 4222 | #ifdef CONFIG_IXGBE_DCB |
3947 | int j; | 4223 | int j; |
@@ -3969,10 +4245,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3969 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; | 4245 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; |
3970 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; | 4246 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; |
3971 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | 4247 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
3972 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | 4248 | if (dev->features & NETIF_F_NTUPLE) { |
4249 | /* Flow Director perfect filter enabled */ | ||
4250 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
4251 | adapter->atr_sample_rate = 0; | ||
4252 | spin_lock_init(&adapter->fdir_perfect_lock); | ||
4253 | } else { | ||
4254 | /* Flow Director hash filters enabled */ | ||
4255 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
4256 | adapter->atr_sample_rate = 20; | ||
4257 | } | ||
3973 | adapter->ring_feature[RING_F_FDIR].indices = | 4258 | adapter->ring_feature[RING_F_FDIR].indices = |
3974 | IXGBE_MAX_FDIR_INDICES; | 4259 | IXGBE_MAX_FDIR_INDICES; |
3975 | adapter->atr_sample_rate = 20; | ||
3976 | adapter->fdir_pballoc = 0; | 4260 | adapter->fdir_pballoc = 0; |
3977 | #ifdef IXGBE_FCOE | 4261 | #ifdef IXGBE_FCOE |
3978 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; | 4262 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; |
@@ -4041,6 +4325,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4041 | /* enable rx csum by default */ | 4325 | /* enable rx csum by default */ |
4042 | adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; | 4326 | adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; |
4043 | 4327 | ||
4328 | /* get assigned NUMA node */ | ||
4329 | adapter->node = dev_to_node(&pdev->dev); | ||
4330 | |||
4044 | set_bit(__IXGBE_DOWN, &adapter->state); | 4331 | set_bit(__IXGBE_DOWN, &adapter->state); |
4045 | 4332 | ||
4046 | return 0; | 4333 | return 0; |
@@ -4060,7 +4347,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, | |||
4060 | int size; | 4347 | int size; |
4061 | 4348 | ||
4062 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; | 4349 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; |
4063 | tx_ring->tx_buffer_info = vmalloc(size); | 4350 | tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node); |
4351 | if (!tx_ring->tx_buffer_info) | ||
4352 | tx_ring->tx_buffer_info = vmalloc(size); | ||
4064 | if (!tx_ring->tx_buffer_info) | 4353 | if (!tx_ring->tx_buffer_info) |
4065 | goto err; | 4354 | goto err; |
4066 | memset(tx_ring->tx_buffer_info, 0, size); | 4355 | memset(tx_ring->tx_buffer_info, 0, size); |
@@ -4102,7 +4391,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) | |||
4102 | int i, err = 0; | 4391 | int i, err = 0; |
4103 | 4392 | ||
4104 | for (i = 0; i < adapter->num_tx_queues; i++) { | 4393 | for (i = 0; i < adapter->num_tx_queues; i++) { |
4105 | err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); | 4394 | err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); |
4106 | if (!err) | 4395 | if (!err) |
4107 | continue; | 4396 | continue; |
4108 | DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); | 4397 | DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); |
@@ -4126,7 +4415,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
4126 | int size; | 4415 | int size; |
4127 | 4416 | ||
4128 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; | 4417 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; |
4129 | rx_ring->rx_buffer_info = vmalloc(size); | 4418 | rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node); |
4419 | if (!rx_ring->rx_buffer_info) | ||
4420 | rx_ring->rx_buffer_info = vmalloc(size); | ||
4130 | if (!rx_ring->rx_buffer_info) { | 4421 | if (!rx_ring->rx_buffer_info) { |
4131 | DPRINTK(PROBE, ERR, | 4422 | DPRINTK(PROBE, ERR, |
4132 | "vmalloc allocation failed for the rx desc ring\n"); | 4423 | "vmalloc allocation failed for the rx desc ring\n"); |
@@ -4172,7 +4463,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) | |||
4172 | int i, err = 0; | 4463 | int i, err = 0; |
4173 | 4464 | ||
4174 | for (i = 0; i < adapter->num_rx_queues; i++) { | 4465 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4175 | err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); | 4466 | err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); |
4176 | if (!err) | 4467 | if (!err) |
4177 | continue; | 4468 | continue; |
4178 | DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); | 4469 | DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); |
@@ -4215,8 +4506,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) | |||
4215 | int i; | 4506 | int i; |
4216 | 4507 | ||
4217 | for (i = 0; i < adapter->num_tx_queues; i++) | 4508 | for (i = 0; i < adapter->num_tx_queues; i++) |
4218 | if (adapter->tx_ring[i].desc) | 4509 | if (adapter->tx_ring[i]->desc) |
4219 | ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); | 4510 | ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]); |
4220 | } | 4511 | } |
4221 | 4512 | ||
4222 | /** | 4513 | /** |
@@ -4252,8 +4543,8 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) | |||
4252 | int i; | 4543 | int i; |
4253 | 4544 | ||
4254 | for (i = 0; i < adapter->num_rx_queues; i++) | 4545 | for (i = 0; i < adapter->num_rx_queues; i++) |
4255 | if (adapter->rx_ring[i].desc) | 4546 | if (adapter->rx_ring[i]->desc) |
4256 | ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); | 4547 | ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]); |
4257 | } | 4548 | } |
4258 | 4549 | ||
4259 | /** | 4550 | /** |
@@ -4530,8 +4821,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
4530 | adapter->hw_rx_no_dma_resources += | 4821 | adapter->hw_rx_no_dma_resources += |
4531 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); | 4822 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); |
4532 | for (i = 0; i < adapter->num_rx_queues; i++) { | 4823 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4533 | rsc_count += adapter->rx_ring[i].rsc_count; | 4824 | rsc_count += adapter->rx_ring[i]->rsc_count; |
4534 | rsc_flush += adapter->rx_ring[i].rsc_flush; | 4825 | rsc_flush += adapter->rx_ring[i]->rsc_flush; |
4535 | } | 4826 | } |
4536 | adapter->rsc_total_count = rsc_count; | 4827 | adapter->rsc_total_count = rsc_count; |
4537 | adapter->rsc_total_flush = rsc_flush; | 4828 | adapter->rsc_total_flush = rsc_flush; |
@@ -4539,11 +4830,11 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
4539 | 4830 | ||
4540 | /* gather some stats to the adapter struct that are per queue */ | 4831 | /* gather some stats to the adapter struct that are per queue */ |
4541 | for (i = 0; i < adapter->num_tx_queues; i++) | 4832 | for (i = 0; i < adapter->num_tx_queues; i++) |
4542 | restart_queue += adapter->tx_ring[i].restart_queue; | 4833 | restart_queue += adapter->tx_ring[i]->restart_queue; |
4543 | adapter->restart_queue = restart_queue; | 4834 | adapter->restart_queue = restart_queue; |
4544 | 4835 | ||
4545 | for (i = 0; i < adapter->num_rx_queues; i++) | 4836 | for (i = 0; i < adapter->num_rx_queues; i++) |
4546 | non_eop_descs += adapter->rx_ring[i].non_eop_descs; | 4837 | non_eop_descs += adapter->rx_ring[i]->non_eop_descs; |
4547 | adapter->non_eop_descs = non_eop_descs; | 4838 | adapter->non_eop_descs = non_eop_descs; |
4548 | 4839 | ||
4549 | adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); | 4840 | adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); |
@@ -4782,7 +5073,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) | |||
4782 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { | 5073 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { |
4783 | for (i = 0; i < adapter->num_tx_queues; i++) | 5074 | for (i = 0; i < adapter->num_tx_queues; i++) |
4784 | set_bit(__IXGBE_FDIR_INIT_DONE, | 5075 | set_bit(__IXGBE_FDIR_INIT_DONE, |
4785 | &(adapter->tx_ring[i].reinit_state)); | 5076 | &(adapter->tx_ring[i]->reinit_state)); |
4786 | } else { | 5077 | } else { |
4787 | DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " | 5078 | DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " |
4788 | "ignored adding FDIR ATR filters \n"); | 5079 | "ignored adding FDIR ATR filters \n"); |
@@ -4791,6 +5082,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) | |||
4791 | netif_tx_start_all_queues(adapter->netdev); | 5082 | netif_tx_start_all_queues(adapter->netdev); |
4792 | } | 5083 | } |
4793 | 5084 | ||
5085 | static DEFINE_MUTEX(ixgbe_watchdog_lock); | ||
5086 | |||
4794 | /** | 5087 | /** |
4795 | * ixgbe_watchdog_task - worker thread to bring link up | 5088 | * ixgbe_watchdog_task - worker thread to bring link up |
4796 | * @work: pointer to work_struct containing our data | 5089 | * @work: pointer to work_struct containing our data |
@@ -4802,13 +5095,16 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
4802 | watchdog_task); | 5095 | watchdog_task); |
4803 | struct net_device *netdev = adapter->netdev; | 5096 | struct net_device *netdev = adapter->netdev; |
4804 | struct ixgbe_hw *hw = &adapter->hw; | 5097 | struct ixgbe_hw *hw = &adapter->hw; |
4805 | u32 link_speed = adapter->link_speed; | 5098 | u32 link_speed; |
4806 | bool link_up = adapter->link_up; | 5099 | bool link_up; |
4807 | int i; | 5100 | int i; |
4808 | struct ixgbe_ring *tx_ring; | 5101 | struct ixgbe_ring *tx_ring; |
4809 | int some_tx_pending = 0; | 5102 | int some_tx_pending = 0; |
4810 | 5103 | ||
4811 | adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; | 5104 | mutex_lock(&ixgbe_watchdog_lock); |
5105 | |||
5106 | link_up = adapter->link_up; | ||
5107 | link_speed = adapter->link_speed; | ||
4812 | 5108 | ||
4813 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { | 5109 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { |
4814 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); | 5110 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); |
@@ -4879,7 +5175,7 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
4879 | 5175 | ||
4880 | if (!netif_carrier_ok(netdev)) { | 5176 | if (!netif_carrier_ok(netdev)) { |
4881 | for (i = 0; i < adapter->num_tx_queues; i++) { | 5177 | for (i = 0; i < adapter->num_tx_queues; i++) { |
4882 | tx_ring = &adapter->tx_ring[i]; | 5178 | tx_ring = adapter->tx_ring[i]; |
4883 | if (tx_ring->next_to_use != tx_ring->next_to_clean) { | 5179 | if (tx_ring->next_to_use != tx_ring->next_to_clean) { |
4884 | some_tx_pending = 1; | 5180 | some_tx_pending = 1; |
4885 | break; | 5181 | break; |
@@ -4897,7 +5193,7 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
4897 | } | 5193 | } |
4898 | 5194 | ||
4899 | ixgbe_update_stats(adapter); | 5195 | ixgbe_update_stats(adapter); |
4900 | adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; | 5196 | mutex_unlock(&ixgbe_watchdog_lock); |
4901 | } | 5197 | } |
4902 | 5198 | ||
4903 | static int ixgbe_tso(struct ixgbe_adapter *adapter, | 5199 | static int ixgbe_tso(struct ixgbe_adapter *adapter, |
@@ -5343,8 +5639,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
5343 | return txq; | 5639 | return txq; |
5344 | } | 5640 | } |
5345 | #endif | 5641 | #endif |
5346 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | 5642 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
5347 | return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13; | 5643 | if (skb->priority == TC_PRIO_CONTROL) |
5644 | txq = adapter->ring_feature[RING_F_DCB].indices-1; | ||
5645 | else | ||
5646 | txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) | ||
5647 | >> 13; | ||
5648 | return txq; | ||
5649 | } | ||
5348 | 5650 | ||
5349 | return skb_tx_hash(dev, skb); | 5651 | return skb_tx_hash(dev, skb); |
5350 | } | 5652 | } |
@@ -5371,17 +5673,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, | |||
5371 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 5673 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
5372 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | 5674 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
5373 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 5675 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
5374 | if (skb->priority != TC_PRIO_CONTROL) { | 5676 | tx_flags |= ((skb->queue_mapping & 0x7) << 13); |
5375 | tx_flags |= ((skb->queue_mapping & 0x7) << 13); | 5677 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; |
5376 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | 5678 | tx_flags |= IXGBE_TX_FLAGS_VLAN; |
5377 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | ||
5378 | } else { | ||
5379 | skb->queue_mapping = | ||
5380 | adapter->ring_feature[RING_F_DCB].indices-1; | ||
5381 | } | ||
5382 | } | 5679 | } |
5383 | 5680 | ||
5384 | tx_ring = &adapter->tx_ring[skb->queue_mapping]; | 5681 | tx_ring = adapter->tx_ring[skb->queue_mapping]; |
5385 | 5682 | ||
5386 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | 5683 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && |
5387 | (skb->protocol == htons(ETH_P_FCOE))) { | 5684 | (skb->protocol == htons(ETH_P_FCOE))) { |
@@ -5487,7 +5784,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) | |||
5487 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 5784 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
5488 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); | 5785 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); |
5489 | 5786 | ||
5490 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | 5787 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, |
5788 | IXGBE_RAH_AV); | ||
5491 | 5789 | ||
5492 | return 0; | 5790 | return 0; |
5493 | } | 5791 | } |
@@ -5624,6 +5922,61 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
5624 | #endif /* IXGBE_FCOE */ | 5922 | #endif /* IXGBE_FCOE */ |
5625 | }; | 5923 | }; |
5626 | 5924 | ||
5925 | static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, | ||
5926 | const struct ixgbe_info *ii) | ||
5927 | { | ||
5928 | #ifdef CONFIG_PCI_IOV | ||
5929 | struct ixgbe_hw *hw = &adapter->hw; | ||
5930 | int err; | ||
5931 | |||
5932 | if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs) | ||
5933 | return; | ||
5934 | |||
5935 | /* The 82599 supports up to 64 VFs per physical function | ||
5936 | * but this implementation limits allocation to 63 so that | ||
5937 | * basic networking resources are still available to the | ||
5938 | * physical function | ||
5939 | */ | ||
5940 | adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; | ||
5941 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; | ||
5942 | err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); | ||
5943 | if (err) { | ||
5944 | DPRINTK(PROBE, ERR, | ||
5945 | "Failed to enable PCI sriov: %d\n", err); | ||
5946 | goto err_novfs; | ||
5947 | } | ||
5948 | /* If call to enable VFs succeeded then allocate memory | ||
5949 | * for per VF control structures. | ||
5950 | */ | ||
5951 | adapter->vfinfo = | ||
5952 | kcalloc(adapter->num_vfs, | ||
5953 | sizeof(struct vf_data_storage), GFP_KERNEL); | ||
5954 | if (adapter->vfinfo) { | ||
5955 | /* Now that we're sure SR-IOV is enabled | ||
5956 | * and memory allocated set up the mailbox parameters | ||
5957 | */ | ||
5958 | ixgbe_init_mbx_params_pf(hw); | ||
5959 | memcpy(&hw->mbx.ops, ii->mbx_ops, | ||
5960 | sizeof(hw->mbx.ops)); | ||
5961 | |||
5962 | /* Disable RSC when in SR-IOV mode */ | ||
5963 | adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | | ||
5964 | IXGBE_FLAG2_RSC_ENABLED); | ||
5965 | return; | ||
5966 | } | ||
5967 | |||
5968 | /* Oh oh */ | ||
5969 | DPRINTK(PROBE, ERR, | ||
5970 | "Unable to allocate memory for VF " | ||
5971 | "Data Storage - SRIOV disabled\n"); | ||
5972 | pci_disable_sriov(adapter->pdev); | ||
5973 | |||
5974 | err_novfs: | ||
5975 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; | ||
5976 | adapter->num_vfs = 0; | ||
5977 | #endif /* CONFIG_PCI_IOV */ | ||
5978 | } | ||
5979 | |||
5627 | /** | 5980 | /** |
5628 | * ixgbe_probe - Device Initialization Routine | 5981 | * ixgbe_probe - Device Initialization Routine |
5629 | * @pdev: PCI device information struct | 5982 | * @pdev: PCI device information struct |
@@ -5644,6 +5997,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5644 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; | 5997 | const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; |
5645 | static int cards_found; | 5998 | static int cards_found; |
5646 | int i, err, pci_using_dac; | 5999 | int i, err, pci_using_dac; |
6000 | unsigned int indices = num_possible_cpus(); | ||
5647 | #ifdef IXGBE_FCOE | 6001 | #ifdef IXGBE_FCOE |
5648 | u16 device_caps; | 6002 | u16 device_caps; |
5649 | #endif | 6003 | #endif |
@@ -5682,7 +6036,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5682 | pci_set_master(pdev); | 6036 | pci_set_master(pdev); |
5683 | pci_save_state(pdev); | 6037 | pci_save_state(pdev); |
5684 | 6038 | ||
5685 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); | 6039 | if (ii->mac == ixgbe_mac_82598EB) |
6040 | indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); | ||
6041 | else | ||
6042 | indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); | ||
6043 | |||
6044 | indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); | ||
6045 | #ifdef IXGBE_FCOE | ||
6046 | indices += min_t(unsigned int, num_possible_cpus(), | ||
6047 | IXGBE_MAX_FCOE_INDICES); | ||
6048 | #endif | ||
6049 | indices = min_t(unsigned int, indices, MAX_TX_QUEUES); | ||
6050 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); | ||
5686 | if (!netdev) { | 6051 | if (!netdev) { |
5687 | err = -ENOMEM; | 6052 | err = -ENOMEM; |
5688 | goto err_alloc_etherdev; | 6053 | goto err_alloc_etherdev; |
@@ -5802,6 +6167,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5802 | goto err_sw_init; | 6167 | goto err_sw_init; |
5803 | } | 6168 | } |
5804 | 6169 | ||
6170 | ixgbe_probe_vf(adapter, ii); | ||
6171 | |||
5805 | netdev->features = NETIF_F_SG | | 6172 | netdev->features = NETIF_F_SG | |
5806 | NETIF_F_IP_CSUM | | 6173 | NETIF_F_IP_CSUM | |
5807 | NETIF_F_HW_VLAN_TX | | 6174 | NETIF_F_HW_VLAN_TX | |
@@ -5822,6 +6189,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5822 | netdev->vlan_features |= NETIF_F_IPV6_CSUM; | 6189 | netdev->vlan_features |= NETIF_F_IPV6_CSUM; |
5823 | netdev->vlan_features |= NETIF_F_SG; | 6190 | netdev->vlan_features |= NETIF_F_SG; |
5824 | 6191 | ||
6192 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
6193 | adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | | ||
6194 | IXGBE_FLAG_DCB_ENABLED); | ||
5825 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | 6195 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) |
5826 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 6196 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
5827 | 6197 | ||
@@ -5948,6 +6318,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5948 | ixgbe_setup_dca(adapter); | 6318 | ixgbe_setup_dca(adapter); |
5949 | } | 6319 | } |
5950 | #endif | 6320 | #endif |
6321 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | ||
6322 | DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n", | ||
6323 | adapter->num_vfs); | ||
6324 | for (i = 0; i < adapter->num_vfs; i++) | ||
6325 | ixgbe_vf_configuration(pdev, (i | 0x10000000)); | ||
6326 | } | ||
6327 | |||
5951 | /* add san mac addr to netdev */ | 6328 | /* add san mac addr to netdev */ |
5952 | ixgbe_add_sanmac_netdev(netdev); | 6329 | ixgbe_add_sanmac_netdev(netdev); |
5953 | 6330 | ||
@@ -5960,6 +6337,8 @@ err_register: | |||
5960 | ixgbe_clear_interrupt_scheme(adapter); | 6337 | ixgbe_clear_interrupt_scheme(adapter); |
5961 | err_sw_init: | 6338 | err_sw_init: |
5962 | err_eeprom: | 6339 | err_eeprom: |
6340 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
6341 | ixgbe_disable_sriov(adapter); | ||
5963 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | 6342 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); |
5964 | del_timer_sync(&adapter->sfp_timer); | 6343 | del_timer_sync(&adapter->sfp_timer); |
5965 | cancel_work_sync(&adapter->sfp_task); | 6344 | cancel_work_sync(&adapter->sfp_task); |
@@ -6028,6 +6407,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
6028 | if (netdev->reg_state == NETREG_REGISTERED) | 6407 | if (netdev->reg_state == NETREG_REGISTERED) |
6029 | unregister_netdev(netdev); | 6408 | unregister_netdev(netdev); |
6030 | 6409 | ||
6410 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
6411 | ixgbe_disable_sriov(adapter); | ||
6412 | |||
6031 | ixgbe_clear_interrupt_scheme(adapter); | 6413 | ixgbe_clear_interrupt_scheme(adapter); |
6032 | 6414 | ||
6033 | ixgbe_release_hw_control(adapter); | 6415 | ixgbe_release_hw_control(adapter); |
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c new file mode 100644 index 00000000000..d75f9148eb1 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_mbx.c | |||
@@ -0,0 +1,479 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #include <linux/pci.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include "ixgbe_type.h" | ||
31 | #include "ixgbe_common.h" | ||
32 | #include "ixgbe_mbx.h" | ||
33 | |||
34 | /** | ||
35 | * ixgbe_read_mbx - Reads a message from the mailbox | ||
36 | * @hw: pointer to the HW structure | ||
37 | * @msg: The message buffer | ||
38 | * @size: Length of buffer | ||
39 | * @mbx_id: id of mailbox to read | ||
40 | * | ||
41 | * returns SUCCESS if it successfuly read message from buffer | ||
42 | **/ | ||
43 | s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) | ||
44 | { | ||
45 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
46 | s32 ret_val = IXGBE_ERR_MBX; | ||
47 | |||
48 | /* limit read to size of mailbox */ | ||
49 | if (size > mbx->size) | ||
50 | size = mbx->size; | ||
51 | |||
52 | if (mbx->ops.read) | ||
53 | ret_val = mbx->ops.read(hw, msg, size, mbx_id); | ||
54 | |||
55 | return ret_val; | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * ixgbe_write_mbx - Write a message to the mailbox | ||
60 | * @hw: pointer to the HW structure | ||
61 | * @msg: The message buffer | ||
62 | * @size: Length of buffer | ||
63 | * @mbx_id: id of mailbox to write | ||
64 | * | ||
65 | * returns SUCCESS if it successfully copied message into the buffer | ||
66 | **/ | ||
67 | s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) | ||
68 | { | ||
69 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
70 | s32 ret_val = 0; | ||
71 | |||
72 | if (size > mbx->size) | ||
73 | ret_val = IXGBE_ERR_MBX; | ||
74 | |||
75 | else if (mbx->ops.write) | ||
76 | ret_val = mbx->ops.write(hw, msg, size, mbx_id); | ||
77 | |||
78 | return ret_val; | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * ixgbe_check_for_msg - checks to see if someone sent us mail | ||
83 | * @hw: pointer to the HW structure | ||
84 | * @mbx_id: id of mailbox to check | ||
85 | * | ||
86 | * returns SUCCESS if the Status bit was found or else ERR_MBX | ||
87 | **/ | ||
88 | s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) | ||
89 | { | ||
90 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
91 | s32 ret_val = IXGBE_ERR_MBX; | ||
92 | |||
93 | if (mbx->ops.check_for_msg) | ||
94 | ret_val = mbx->ops.check_for_msg(hw, mbx_id); | ||
95 | |||
96 | return ret_val; | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * ixgbe_check_for_ack - checks to see if someone sent us ACK | ||
101 | * @hw: pointer to the HW structure | ||
102 | * @mbx_id: id of mailbox to check | ||
103 | * | ||
104 | * returns SUCCESS if the Status bit was found or else ERR_MBX | ||
105 | **/ | ||
106 | s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) | ||
107 | { | ||
108 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
109 | s32 ret_val = IXGBE_ERR_MBX; | ||
110 | |||
111 | if (mbx->ops.check_for_ack) | ||
112 | ret_val = mbx->ops.check_for_ack(hw, mbx_id); | ||
113 | |||
114 | return ret_val; | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * ixgbe_check_for_rst - checks to see if other side has reset | ||
119 | * @hw: pointer to the HW structure | ||
120 | * @mbx_id: id of mailbox to check | ||
121 | * | ||
122 | * returns SUCCESS if the Status bit was found or else ERR_MBX | ||
123 | **/ | ||
124 | s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) | ||
125 | { | ||
126 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
127 | s32 ret_val = IXGBE_ERR_MBX; | ||
128 | |||
129 | if (mbx->ops.check_for_rst) | ||
130 | ret_val = mbx->ops.check_for_rst(hw, mbx_id); | ||
131 | |||
132 | return ret_val; | ||
133 | } | ||
134 | |||
135 | /** | ||
136 | * ixgbe_poll_for_msg - Wait for message notification | ||
137 | * @hw: pointer to the HW structure | ||
138 | * @mbx_id: id of mailbox to write | ||
139 | * | ||
140 | * returns SUCCESS if it successfully received a message notification | ||
141 | **/ | ||
142 | static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) | ||
143 | { | ||
144 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
145 | int countdown = mbx->timeout; | ||
146 | |||
147 | if (!countdown || !mbx->ops.check_for_msg) | ||
148 | goto out; | ||
149 | |||
150 | while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { | ||
151 | countdown--; | ||
152 | if (!countdown) | ||
153 | break; | ||
154 | udelay(mbx->usec_delay); | ||
155 | } | ||
156 | |||
157 | /* if we failed, all future posted messages fail until reset */ | ||
158 | if (!countdown) | ||
159 | mbx->timeout = 0; | ||
160 | out: | ||
161 | return countdown ? 0 : IXGBE_ERR_MBX; | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * ixgbe_poll_for_ack - Wait for message acknowledgement | ||
166 | * @hw: pointer to the HW structure | ||
167 | * @mbx_id: id of mailbox to write | ||
168 | * | ||
169 | * returns SUCCESS if it successfully received a message acknowledgement | ||
170 | **/ | ||
171 | static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) | ||
172 | { | ||
173 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
174 | int countdown = mbx->timeout; | ||
175 | |||
176 | if (!countdown || !mbx->ops.check_for_ack) | ||
177 | goto out; | ||
178 | |||
179 | while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { | ||
180 | countdown--; | ||
181 | if (!countdown) | ||
182 | break; | ||
183 | udelay(mbx->usec_delay); | ||
184 | } | ||
185 | |||
186 | /* if we failed, all future posted messages fail until reset */ | ||
187 | if (!countdown) | ||
188 | mbx->timeout = 0; | ||
189 | out: | ||
190 | return countdown ? 0 : IXGBE_ERR_MBX; | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * ixgbe_read_posted_mbx - Wait for message notification and receive message | ||
195 | * @hw: pointer to the HW structure | ||
196 | * @msg: The message buffer | ||
197 | * @size: Length of buffer | ||
198 | * @mbx_id: id of mailbox to write | ||
199 | * | ||
200 | * returns SUCCESS if it successfully received a message notification and | ||
201 | * copied it into the receive buffer. | ||
202 | **/ | ||
203 | s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) | ||
204 | { | ||
205 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
206 | s32 ret_val = IXGBE_ERR_MBX; | ||
207 | |||
208 | if (!mbx->ops.read) | ||
209 | goto out; | ||
210 | |||
211 | ret_val = ixgbe_poll_for_msg(hw, mbx_id); | ||
212 | |||
213 | /* if ack received read message, otherwise we timed out */ | ||
214 | if (!ret_val) | ||
215 | ret_val = mbx->ops.read(hw, msg, size, mbx_id); | ||
216 | out: | ||
217 | return ret_val; | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack | ||
222 | * @hw: pointer to the HW structure | ||
223 | * @msg: The message buffer | ||
224 | * @size: Length of buffer | ||
225 | * @mbx_id: id of mailbox to write | ||
226 | * | ||
227 | * returns SUCCESS if it successfully copied message into the buffer and | ||
228 | * received an ack to that message within delay * timeout period | ||
229 | **/ | ||
230 | s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, | ||
231 | u16 mbx_id) | ||
232 | { | ||
233 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
234 | s32 ret_val = IXGBE_ERR_MBX; | ||
235 | |||
236 | /* exit if either we can't write or there isn't a defined timeout */ | ||
237 | if (!mbx->ops.write || !mbx->timeout) | ||
238 | goto out; | ||
239 | |||
240 | /* send msg */ | ||
241 | ret_val = mbx->ops.write(hw, msg, size, mbx_id); | ||
242 | |||
243 | /* if msg sent wait until we receive an ack */ | ||
244 | if (!ret_val) | ||
245 | ret_val = ixgbe_poll_for_ack(hw, mbx_id); | ||
246 | out: | ||
247 | return ret_val; | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * ixgbe_init_mbx_ops_generic - Initialize MB function pointers | ||
252 | * @hw: pointer to the HW structure | ||
253 | * | ||
254 | * Setup the mailbox read and write message function pointers | ||
255 | **/ | ||
256 | void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw) | ||
257 | { | ||
258 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
259 | |||
260 | mbx->ops.read_posted = ixgbe_read_posted_mbx; | ||
261 | mbx->ops.write_posted = ixgbe_write_posted_mbx; | ||
262 | } | ||
263 | |||
264 | static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) | ||
265 | { | ||
266 | u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); | ||
267 | s32 ret_val = IXGBE_ERR_MBX; | ||
268 | |||
269 | if (mbvficr & mask) { | ||
270 | ret_val = 0; | ||
271 | IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); | ||
272 | } | ||
273 | |||
274 | return ret_val; | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail | ||
279 | * @hw: pointer to the HW structure | ||
280 | * @vf_number: the VF index | ||
281 | * | ||
282 | * returns SUCCESS if the VF has set the Status bit or else ERR_MBX | ||
283 | **/ | ||
284 | static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) | ||
285 | { | ||
286 | s32 ret_val = IXGBE_ERR_MBX; | ||
287 | s32 index = IXGBE_MBVFICR_INDEX(vf_number); | ||
288 | u32 vf_bit = vf_number % 16; | ||
289 | |||
290 | if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, | ||
291 | index)) { | ||
292 | ret_val = 0; | ||
293 | hw->mbx.stats.reqs++; | ||
294 | } | ||
295 | |||
296 | return ret_val; | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed | ||
301 | * @hw: pointer to the HW structure | ||
302 | * @vf_number: the VF index | ||
303 | * | ||
304 | * returns SUCCESS if the VF has set the Status bit or else ERR_MBX | ||
305 | **/ | ||
306 | static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) | ||
307 | { | ||
308 | s32 ret_val = IXGBE_ERR_MBX; | ||
309 | s32 index = IXGBE_MBVFICR_INDEX(vf_number); | ||
310 | u32 vf_bit = vf_number % 16; | ||
311 | |||
312 | if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, | ||
313 | index)) { | ||
314 | ret_val = 0; | ||
315 | hw->mbx.stats.acks++; | ||
316 | } | ||
317 | |||
318 | return ret_val; | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * ixgbe_check_for_rst_pf - checks to see if the VF has reset | ||
323 | * @hw: pointer to the HW structure | ||
324 | * @vf_number: the VF index | ||
325 | * | ||
326 | * returns SUCCESS if the VF has set the Status bit or else ERR_MBX | ||
327 | **/ | ||
328 | static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) | ||
329 | { | ||
330 | u32 reg_offset = (vf_number < 32) ? 0 : 1; | ||
331 | u32 vf_shift = vf_number % 32; | ||
332 | u32 vflre = 0; | ||
333 | s32 ret_val = IXGBE_ERR_MBX; | ||
334 | |||
335 | if (hw->mac.type == ixgbe_mac_82599EB) | ||
336 | vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); | ||
337 | |||
338 | if (vflre & (1 << vf_shift)) { | ||
339 | ret_val = 0; | ||
340 | IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); | ||
341 | hw->mbx.stats.rsts++; | ||
342 | } | ||
343 | |||
344 | return ret_val; | ||
345 | } | ||
346 | |||
347 | /** | ||
348 | * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock | ||
349 | * @hw: pointer to the HW structure | ||
350 | * @vf_number: the VF index | ||
351 | * | ||
352 | * return SUCCESS if we obtained the mailbox lock | ||
353 | **/ | ||
354 | static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) | ||
355 | { | ||
356 | s32 ret_val = IXGBE_ERR_MBX; | ||
357 | u32 p2v_mailbox; | ||
358 | |||
359 | /* Take ownership of the buffer */ | ||
360 | IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); | ||
361 | |||
362 | /* reserve mailbox for vf use */ | ||
363 | p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); | ||
364 | if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) | ||
365 | ret_val = 0; | ||
366 | |||
367 | return ret_val; | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * ixgbe_write_mbx_pf - Places a message in the mailbox | ||
372 | * @hw: pointer to the HW structure | ||
373 | * @msg: The message buffer | ||
374 | * @size: Length of buffer | ||
375 | * @vf_number: the VF index | ||
376 | * | ||
377 | * returns SUCCESS if it successfully copied message into the buffer | ||
378 | **/ | ||
379 | static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, | ||
380 | u16 vf_number) | ||
381 | { | ||
382 | s32 ret_val; | ||
383 | u16 i; | ||
384 | |||
385 | /* lock the mailbox to prevent pf/vf race condition */ | ||
386 | ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); | ||
387 | if (ret_val) | ||
388 | goto out_no_write; | ||
389 | |||
390 | /* flush msg and acks as we are overwriting the message buffer */ | ||
391 | ixgbe_check_for_msg_pf(hw, vf_number); | ||
392 | ixgbe_check_for_ack_pf(hw, vf_number); | ||
393 | |||
394 | /* copy the caller specified message to the mailbox memory buffer */ | ||
395 | for (i = 0; i < size; i++) | ||
396 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); | ||
397 | |||
398 | /* Interrupt VF to tell it a message has been sent and release buffer*/ | ||
399 | IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); | ||
400 | |||
401 | /* update stats */ | ||
402 | hw->mbx.stats.msgs_tx++; | ||
403 | |||
404 | out_no_write: | ||
405 | return ret_val; | ||
406 | |||
407 | } | ||
408 | |||
409 | /** | ||
410 | * ixgbe_read_mbx_pf - Read a message from the mailbox | ||
411 | * @hw: pointer to the HW structure | ||
412 | * @msg: The message buffer | ||
413 | * @size: Length of buffer | ||
414 | * @vf_number: the VF index | ||
415 | * | ||
416 | * This function copies a message from the mailbox buffer to the caller's | ||
417 | * memory buffer. The presumption is that the caller knows that there was | ||
418 | * a message due to a VF request so no polling for message is needed. | ||
419 | **/ | ||
420 | static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, | ||
421 | u16 vf_number) | ||
422 | { | ||
423 | s32 ret_val; | ||
424 | u16 i; | ||
425 | |||
426 | /* lock the mailbox to prevent pf/vf race condition */ | ||
427 | ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); | ||
428 | if (ret_val) | ||
429 | goto out_no_read; | ||
430 | |||
431 | /* copy the message to the mailbox memory buffer */ | ||
432 | for (i = 0; i < size; i++) | ||
433 | msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); | ||
434 | |||
435 | /* Acknowledge the message and release buffer */ | ||
436 | IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); | ||
437 | |||
438 | /* update stats */ | ||
439 | hw->mbx.stats.msgs_rx++; | ||
440 | |||
441 | out_no_read: | ||
442 | return ret_val; | ||
443 | } | ||
444 | |||
445 | /** | ||
446 | * ixgbe_init_mbx_params_pf - set initial values for pf mailbox | ||
447 | * @hw: pointer to the HW structure | ||
448 | * | ||
449 | * Initializes the hw->mbx struct to correct values for pf mailbox | ||
450 | */ | ||
451 | void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) | ||
452 | { | ||
453 | struct ixgbe_mbx_info *mbx = &hw->mbx; | ||
454 | |||
455 | if (hw->mac.type != ixgbe_mac_82599EB) | ||
456 | return; | ||
457 | |||
458 | mbx->timeout = 0; | ||
459 | mbx->usec_delay = 0; | ||
460 | |||
461 | mbx->size = IXGBE_VFMAILBOX_SIZE; | ||
462 | |||
463 | mbx->stats.msgs_tx = 0; | ||
464 | mbx->stats.msgs_rx = 0; | ||
465 | mbx->stats.reqs = 0; | ||
466 | mbx->stats.acks = 0; | ||
467 | mbx->stats.rsts = 0; | ||
468 | } | ||
469 | |||
470 | struct ixgbe_mbx_operations mbx_ops_82599 = { | ||
471 | .read = ixgbe_read_mbx_pf, | ||
472 | .write = ixgbe_write_mbx_pf, | ||
473 | .read_posted = ixgbe_read_posted_mbx, | ||
474 | .write_posted = ixgbe_write_posted_mbx, | ||
475 | .check_for_msg = ixgbe_check_for_msg_pf, | ||
476 | .check_for_ack = ixgbe_check_for_ack_pf, | ||
477 | .check_for_rst = ixgbe_check_for_rst_pf, | ||
478 | }; | ||
479 | |||
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h new file mode 100644 index 00000000000..be7ab3309ab --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_mbx.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #ifndef _IXGBE_MBX_H_ | ||
29 | #define _IXGBE_MBX_H_ | ||
30 | |||
31 | #include "ixgbe_type.h" | ||
32 | |||
33 | #define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ | ||
34 | #define IXGBE_ERR_MBX -100 | ||
35 | |||
36 | #define IXGBE_VFMAILBOX 0x002FC | ||
37 | #define IXGBE_VFMBMEM 0x00200 | ||
38 | |||
39 | #define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x)) | ||
40 | #define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn)) | ||
41 | |||
42 | #define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ | ||
43 | #define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ | ||
44 | #define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ | ||
45 | #define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ | ||
46 | #define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ | ||
47 | |||
48 | #define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ | ||
49 | #define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ | ||
50 | #define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ | ||
51 | #define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ | ||
52 | |||
53 | |||
54 | /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the | ||
55 | * PF. The reverse is true if it is IXGBE_PF_*. | ||
56 | * Message ACK's are the value or'd with 0xF0000000 | ||
57 | */ | ||
58 | #define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with | ||
59 | * this are the ACK */ | ||
60 | #define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with | ||
61 | * this are the NACK */ | ||
62 | #define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still | ||
63 | clear to send requests */ | ||
64 | #define IXGBE_VT_MSGINFO_SHIFT 16 | ||
65 | /* bits 23:16 are used for exra info for certain messages */ | ||
66 | #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) | ||
67 | |||
68 | #define IXGBE_VF_RESET 0x01 /* VF requests reset */ | ||
69 | #define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ | ||
70 | #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ | ||
71 | #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ | ||
72 | #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ | ||
73 | |||
74 | /* length of permanent address message returned from PF */ | ||
75 | #define IXGBE_VF_PERMADDR_MSG_LEN 4 | ||
76 | /* word in permanent address message with the current multicast type */ | ||
77 | #define IXGBE_VF_MC_TYPE_WORD 3 | ||
78 | |||
79 | #define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ | ||
80 | |||
81 | #define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ | ||
82 | #define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ | ||
83 | |||
84 | s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); | ||
85 | s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); | ||
86 | s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); | ||
87 | s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); | ||
88 | s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); | ||
89 | s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); | ||
90 | s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); | ||
91 | void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw); | ||
92 | void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); | ||
93 | |||
94 | extern struct ixgbe_mbx_operations mbx_ops_82599; | ||
95 | |||
96 | #endif /* _IXGBE_MBX_H_ */ | ||
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c new file mode 100644 index 00000000000..d4cd20f3019 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_sriov.c | |||
@@ -0,0 +1,362 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | |||
29 | #include <linux/types.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/netdevice.h> | ||
33 | #include <linux/vmalloc.h> | ||
34 | #include <linux/string.h> | ||
35 | #include <linux/in.h> | ||
36 | #include <linux/ip.h> | ||
37 | #include <linux/tcp.h> | ||
38 | #include <linux/ipv6.h> | ||
39 | #ifdef NETIF_F_HW_VLAN_TX | ||
40 | #include <linux/if_vlan.h> | ||
41 | #endif | ||
42 | |||
43 | #include "ixgbe.h" | ||
44 | |||
45 | #include "ixgbe_sriov.h" | ||
46 | |||
47 | int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, | ||
48 | int entries, u16 *hash_list, u32 vf) | ||
49 | { | ||
50 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; | ||
51 | int i; | ||
52 | |||
53 | /* only so many hash values supported */ | ||
54 | entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); | ||
55 | |||
56 | /* | ||
57 | * salt away the number of multi cast addresses assigned | ||
58 | * to this VF for later use to restore when the PF multi cast | ||
59 | * list changes | ||
60 | */ | ||
61 | vfinfo->num_vf_mc_hashes = entries; | ||
62 | |||
63 | /* | ||
64 | * VFs are limited to using the MTA hash table for their multicast | ||
65 | * addresses | ||
66 | */ | ||
67 | for (i = 0; i < entries; i++) { | ||
68 | vfinfo->vf_mc_hashes[i] = hash_list[i];; | ||
69 | } | ||
70 | |||
71 | /* Flush and reset the mta with the new values */ | ||
72 | ixgbe_set_rx_mode(adapter->netdev); | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) | ||
78 | { | ||
79 | struct ixgbe_hw *hw = &adapter->hw; | ||
80 | struct vf_data_storage *vfinfo; | ||
81 | int i, j; | ||
82 | u32 vector_bit; | ||
83 | u32 vector_reg; | ||
84 | u32 mta_reg; | ||
85 | |||
86 | for (i = 0; i < adapter->num_vfs; i++) { | ||
87 | vfinfo = &adapter->vfinfo[i]; | ||
88 | for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { | ||
89 | hw->addr_ctrl.mta_in_use++; | ||
90 | vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; | ||
91 | vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; | ||
92 | mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); | ||
93 | mta_reg |= (1 << vector_bit); | ||
94 | IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); | ||
95 | } | ||
96 | } | ||
97 | } | ||
98 | |||
99 | int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) | ||
100 | { | ||
101 | u32 ctrl; | ||
102 | |||
103 | /* Check if global VLAN already set, if not set it */ | ||
104 | ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); | ||
105 | if (!(ctrl & IXGBE_VLNCTRL_VFE)) { | ||
106 | /* enable VLAN tag insert/strip */ | ||
107 | ctrl |= IXGBE_VLNCTRL_VFE; | ||
108 | ctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||
109 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); | ||
110 | } | ||
111 | |||
112 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); | ||
113 | } | ||
114 | |||
115 | |||
116 | void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf) | ||
117 | { | ||
118 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); | ||
119 | vmolr |= (IXGBE_VMOLR_AUPE | | ||
120 | IXGBE_VMOLR_ROMPE | | ||
121 | IXGBE_VMOLR_ROPE | | ||
122 | IXGBE_VMOLR_BAM); | ||
123 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); | ||
124 | } | ||
125 | |||
126 | inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) | ||
127 | { | ||
128 | struct ixgbe_hw *hw = &adapter->hw; | ||
129 | |||
130 | /* reset offloads to defaults */ | ||
131 | ixgbe_set_vmolr(hw, vf); | ||
132 | |||
133 | |||
134 | /* reset multicast table array for vf */ | ||
135 | adapter->vfinfo[vf].num_vf_mc_hashes = 0; | ||
136 | |||
137 | /* Flush and reset the mta with the new values */ | ||
138 | ixgbe_set_rx_mode(adapter->netdev); | ||
139 | |||
140 | if (adapter->vfinfo[vf].rar > 0) { | ||
141 | adapter->hw.mac.ops.clear_rar(&adapter->hw, | ||
142 | adapter->vfinfo[vf].rar); | ||
143 | adapter->vfinfo[vf].rar = -1; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, | ||
148 | int vf, unsigned char *mac_addr) | ||
149 | { | ||
150 | struct ixgbe_hw *hw = &adapter->hw; | ||
151 | |||
152 | adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr, | ||
153 | vf, IXGBE_RAH_AV); | ||
154 | if (adapter->vfinfo[vf].rar < 0) { | ||
155 | DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf); | ||
156 | return -1; | ||
157 | } | ||
158 | |||
159 | memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) | ||
165 | { | ||
166 | unsigned char vf_mac_addr[6]; | ||
167 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
168 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
169 | unsigned int vfn = (event_mask & 0x3f); | ||
170 | |||
171 | bool enable = ((event_mask & 0x10000000U) != 0); | ||
172 | |||
173 | if (enable) { | ||
174 | random_ether_addr(vf_mac_addr); | ||
175 | DPRINTK(PROBE, INFO, "IOV: VF %d is enabled " | ||
176 | "mac %02X:%02X:%02X:%02X:%02X:%02X\n", | ||
177 | vfn, | ||
178 | vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2], | ||
179 | vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]); | ||
180 | /* | ||
181 | * Store away the VF "permananet" MAC address, it will ask | ||
182 | * for it later. | ||
183 | */ | ||
184 | memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); | ||
185 | } | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) | ||
191 | { | ||
192 | struct ixgbe_hw *hw = &adapter->hw; | ||
193 | u32 reg; | ||
194 | u32 reg_offset, vf_shift; | ||
195 | |||
196 | vf_shift = vf % 32; | ||
197 | reg_offset = vf / 32; | ||
198 | |||
199 | /* enable transmit and receive for vf */ | ||
200 | reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); | ||
201 | reg |= (reg | (1 << vf_shift)); | ||
202 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); | ||
203 | |||
204 | reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); | ||
205 | reg |= (reg | (1 << vf_shift)); | ||
206 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); | ||
207 | |||
208 | ixgbe_vf_reset_event(adapter, vf); | ||
209 | } | ||
210 | |||
211 | static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) | ||
212 | { | ||
213 | u32 mbx_size = IXGBE_VFMAILBOX_SIZE; | ||
214 | u32 msgbuf[mbx_size]; | ||
215 | struct ixgbe_hw *hw = &adapter->hw; | ||
216 | s32 retval; | ||
217 | int entries; | ||
218 | u16 *hash_list; | ||
219 | int add, vid; | ||
220 | |||
221 | retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); | ||
222 | |||
223 | if (retval) | ||
224 | printk(KERN_ERR "Error receiving message from VF\n"); | ||
225 | |||
226 | /* this is a message we already processed, do nothing */ | ||
227 | if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) | ||
228 | return retval; | ||
229 | |||
230 | /* | ||
231 | * until the vf completes a virtual function reset it should not be | ||
232 | * allowed to start any configuration. | ||
233 | */ | ||
234 | |||
235 | if (msgbuf[0] == IXGBE_VF_RESET) { | ||
236 | unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; | ||
237 | u8 *addr = (u8 *)(&msgbuf[1]); | ||
238 | DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf); | ||
239 | adapter->vfinfo[vf].clear_to_send = false; | ||
240 | ixgbe_vf_reset_msg(adapter, vf); | ||
241 | adapter->vfinfo[vf].clear_to_send = true; | ||
242 | |||
243 | /* reply to reset with ack and vf mac address */ | ||
244 | msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; | ||
245 | memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS); | ||
246 | /* | ||
247 | * Piggyback the multicast filter type so VF can compute the | ||
248 | * correct vectors | ||
249 | */ | ||
250 | msgbuf[3] = hw->mac.mc_filter_type; | ||
251 | ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); | ||
252 | |||
253 | return retval; | ||
254 | } | ||
255 | |||
256 | if (!adapter->vfinfo[vf].clear_to_send) { | ||
257 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; | ||
258 | ixgbe_write_mbx(hw, msgbuf, 1, vf); | ||
259 | return retval; | ||
260 | } | ||
261 | |||
262 | switch ((msgbuf[0] & 0xFFFF)) { | ||
263 | case IXGBE_VF_SET_MAC_ADDR: | ||
264 | { | ||
265 | u8 *new_mac = ((u8 *)(&msgbuf[1])); | ||
266 | if (is_valid_ether_addr(new_mac)) | ||
267 | ixgbe_set_vf_mac(adapter, vf, new_mac); | ||
268 | else | ||
269 | retval = -1; | ||
270 | } | ||
271 | break; | ||
272 | case IXGBE_VF_SET_MULTICAST: | ||
273 | entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) | ||
274 | >> IXGBE_VT_MSGINFO_SHIFT; | ||
275 | hash_list = (u16 *)&msgbuf[1]; | ||
276 | retval = ixgbe_set_vf_multicasts(adapter, entries, | ||
277 | hash_list, vf); | ||
278 | break; | ||
279 | case IXGBE_VF_SET_LPE: | ||
280 | WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE); | ||
281 | break; | ||
282 | case IXGBE_VF_SET_VLAN: | ||
283 | add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) | ||
284 | >> IXGBE_VT_MSGINFO_SHIFT; | ||
285 | vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); | ||
286 | retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); | ||
287 | break; | ||
288 | default: | ||
289 | DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]); | ||
290 | retval = IXGBE_ERR_MBX; | ||
291 | break; | ||
292 | } | ||
293 | |||
294 | /* notify the VF of the results of what it sent us */ | ||
295 | if (retval) | ||
296 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; | ||
297 | else | ||
298 | msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; | ||
299 | |||
300 | msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; | ||
301 | |||
302 | ixgbe_write_mbx(hw, msgbuf, 1, vf); | ||
303 | |||
304 | return retval; | ||
305 | } | ||
306 | |||
307 | static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) | ||
308 | { | ||
309 | struct ixgbe_hw *hw = &adapter->hw; | ||
310 | u32 msg = IXGBE_VT_MSGTYPE_NACK; | ||
311 | |||
312 | /* if device isn't clear to send it shouldn't be reading either */ | ||
313 | if (!adapter->vfinfo[vf].clear_to_send) | ||
314 | ixgbe_write_mbx(hw, &msg, 1, vf); | ||
315 | } | ||
316 | |||
317 | void ixgbe_msg_task(struct ixgbe_adapter *adapter) | ||
318 | { | ||
319 | struct ixgbe_hw *hw = &adapter->hw; | ||
320 | u32 vf; | ||
321 | |||
322 | for (vf = 0; vf < adapter->num_vfs; vf++) { | ||
323 | /* process any reset requests */ | ||
324 | if (!ixgbe_check_for_rst(hw, vf)) | ||
325 | ixgbe_vf_reset_event(adapter, vf); | ||
326 | |||
327 | /* process any messages pending */ | ||
328 | if (!ixgbe_check_for_msg(hw, vf)) | ||
329 | ixgbe_rcv_msg_from_vf(adapter, vf); | ||
330 | |||
331 | /* process any acks */ | ||
332 | if (!ixgbe_check_for_ack(hw, vf)) | ||
333 | ixgbe_rcv_ack_from_vf(adapter, vf); | ||
334 | } | ||
335 | } | ||
336 | |||
337 | void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) | ||
338 | { | ||
339 | struct ixgbe_hw *hw = &adapter->hw; | ||
340 | |||
341 | /* disable transmit and receive for all vfs */ | ||
342 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); | ||
343 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); | ||
344 | |||
345 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); | ||
346 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); | ||
347 | } | ||
348 | |||
349 | void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) | ||
350 | { | ||
351 | struct ixgbe_hw *hw = &adapter->hw; | ||
352 | u32 ping; | ||
353 | int i; | ||
354 | |||
355 | for (i = 0 ; i < adapter->num_vfs; i++) { | ||
356 | ping = IXGBE_PF_CONTROL_MSG; | ||
357 | if (adapter->vfinfo[i].clear_to_send) | ||
358 | ping |= IXGBE_VT_MSGTYPE_CTS; | ||
359 | ixgbe_write_mbx(hw, &ping, 1, i); | ||
360 | } | ||
361 | } | ||
362 | |||
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h new file mode 100644 index 00000000000..51d1106c45a --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_sriov.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2009 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #ifndef _IXGBE_SRIOV_H_ | ||
29 | #define _IXGBE_SRIOV_H_ | ||
30 | |||
31 | int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, | ||
32 | int entries, u16 *hash_list, u32 vf); | ||
33 | void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); | ||
34 | int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); | ||
35 | void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf); | ||
36 | void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf); | ||
37 | void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf); | ||
38 | void ixgbe_msg_task(struct ixgbe_adapter *adapter); | ||
39 | int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, | ||
40 | int vf, unsigned char *mac_addr); | ||
41 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); | ||
42 | void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); | ||
43 | void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); | ||
44 | void ixgbe_dump_registers(struct ixgbe_adapter *adapter); | ||
45 | |||
46 | #endif /* _IXGBE_SRIOV_H_ */ | ||
47 | |||
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 9eafddfa1b9..2be90746659 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
32 | #include <linux/mdio.h> | 32 | #include <linux/mdio.h> |
33 | #include <linux/list.h> | 33 | #include <linux/netdevice.h> |
34 | 34 | ||
35 | /* Vendor ID */ | 35 | /* Vendor ID */ |
36 | #define IXGBE_INTEL_VENDOR_ID 0x8086 | 36 | #define IXGBE_INTEL_VENDOR_ID 0x8086 |
@@ -277,6 +277,7 @@ | |||
277 | #define IXGBE_DTXCTL 0x07E00 | 277 | #define IXGBE_DTXCTL 0x07E00 |
278 | 278 | ||
279 | #define IXGBE_DMATXCTL 0x04A80 | 279 | #define IXGBE_DMATXCTL 0x04A80 |
280 | #define IXGBE_PFDTXGSWC 0x08220 | ||
280 | #define IXGBE_DTXMXSZRQ 0x08100 | 281 | #define IXGBE_DTXMXSZRQ 0x08100 |
281 | #define IXGBE_DTXTCPFLGL 0x04A88 | 282 | #define IXGBE_DTXTCPFLGL 0x04A88 |
282 | #define IXGBE_DTXTCPFLGH 0x04A8C | 283 | #define IXGBE_DTXTCPFLGH 0x04A8C |
@@ -287,6 +288,8 @@ | |||
287 | #define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ | 288 | #define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ |
288 | #define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ | 289 | #define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ |
289 | #define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ | 290 | #define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ |
291 | |||
292 | #define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ | ||
290 | #define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ | 293 | #define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ |
291 | /* Tx DCA Control register : 128 of these (0-127) */ | 294 | /* Tx DCA Control register : 128 of these (0-127) */ |
292 | #define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) | 295 | #define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) |
@@ -497,6 +500,7 @@ | |||
497 | /* DCB registers */ | 500 | /* DCB registers */ |
498 | #define IXGBE_RTRPCS 0x02430 | 501 | #define IXGBE_RTRPCS 0x02430 |
499 | #define IXGBE_RTTDCS 0x04900 | 502 | #define IXGBE_RTTDCS 0x04900 |
503 | #define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ | ||
500 | #define IXGBE_RTTPCS 0x0CD00 | 504 | #define IXGBE_RTTPCS 0x0CD00 |
501 | #define IXGBE_RTRUP2TC 0x03020 | 505 | #define IXGBE_RTRUP2TC 0x03020 |
502 | #define IXGBE_RTTUP2TC 0x0C800 | 506 | #define IXGBE_RTTUP2TC 0x0C800 |
@@ -730,6 +734,13 @@ | |||
730 | #define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 | 734 | #define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 |
731 | #define IXGBE_GCR_CAP_VER2 0x00040000 | 735 | #define IXGBE_GCR_CAP_VER2 0x00040000 |
732 | 736 | ||
737 | #define IXGBE_GCR_EXT_MSIX_EN 0x80000000 | ||
738 | #define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 | ||
739 | #define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 | ||
740 | #define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 | ||
741 | #define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ | ||
742 | IXGBE_GCR_EXT_VT_MODE_64) | ||
743 | |||
733 | /* Time Sync Registers */ | 744 | /* Time Sync Registers */ |
734 | #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ | 745 | #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ |
735 | #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ | 746 | #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ |
@@ -1065,6 +1076,8 @@ | |||
1065 | /* VFRE bitmask */ | 1076 | /* VFRE bitmask */ |
1066 | #define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF | 1077 | #define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF |
1067 | 1078 | ||
1079 | #define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ | ||
1080 | |||
1068 | /* RDHMPN and TDHMPN bitmasks */ | 1081 | /* RDHMPN and TDHMPN bitmasks */ |
1069 | #define IXGBE_RDHMPN_RDICADDR 0x007FF800 | 1082 | #define IXGBE_RDHMPN_RDICADDR 0x007FF800 |
1070 | #define IXGBE_RDHMPN_RDICRDREQ 0x00800000 | 1083 | #define IXGBE_RDHMPN_RDICRDREQ 0x00800000 |
@@ -1295,6 +1308,7 @@ | |||
1295 | /* VLAN pool filtering masks */ | 1308 | /* VLAN pool filtering masks */ |
1296 | #define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ | 1309 | #define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ |
1297 | #define IXGBE_VLVF_ENTRIES 64 | 1310 | #define IXGBE_VLVF_ENTRIES 64 |
1311 | #define IXGBE_VLVF_VLANID_MASK 0x00000FFF | ||
1298 | 1312 | ||
1299 | #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ | 1313 | #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ |
1300 | 1314 | ||
@@ -1843,6 +1857,12 @@ | |||
1843 | #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ | 1857 | #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ |
1844 | #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT | 1858 | #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT |
1845 | 1859 | ||
1860 | /* SR-IOV specific macros */ | ||
1861 | #define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) | ||
1862 | #define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4)) | ||
1863 | #define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) | ||
1864 | #define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) | ||
1865 | |||
1846 | /* Little Endian defines */ | 1866 | /* Little Endian defines */ |
1847 | #ifndef __le32 | 1867 | #ifndef __le32 |
1848 | #define __le32 u32 | 1868 | #define __le32 u32 |
@@ -2109,6 +2129,15 @@ struct ixgbe_atr_input { | |||
2109 | u8 byte_stream[42]; | 2129 | u8 byte_stream[42]; |
2110 | }; | 2130 | }; |
2111 | 2131 | ||
2132 | struct ixgbe_atr_input_masks { | ||
2133 | u32 src_ip_mask; | ||
2134 | u32 dst_ip_mask; | ||
2135 | u16 src_port_mask; | ||
2136 | u16 dst_port_mask; | ||
2137 | u16 vlan_id_mask; | ||
2138 | u16 data_mask; | ||
2139 | }; | ||
2140 | |||
2112 | enum ixgbe_eeprom_type { | 2141 | enum ixgbe_eeprom_type { |
2113 | ixgbe_eeprom_uninitialized = 0, | 2142 | ixgbe_eeprom_uninitialized = 0, |
2114 | ixgbe_eeprom_spi, | 2143 | ixgbe_eeprom_spi, |
@@ -2385,7 +2414,7 @@ struct ixgbe_mac_operations { | |||
2385 | s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); | 2414 | s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); |
2386 | s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); | 2415 | s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); |
2387 | s32 (*init_rx_addrs)(struct ixgbe_hw *); | 2416 | s32 (*init_rx_addrs)(struct ixgbe_hw *); |
2388 | s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct list_head *); | 2417 | s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *); |
2389 | s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, | 2418 | s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, |
2390 | ixgbe_mc_addr_itr); | 2419 | ixgbe_mc_addr_itr); |
2391 | s32 (*enable_mc)(struct ixgbe_hw *); | 2420 | s32 (*enable_mc)(struct ixgbe_hw *); |
@@ -2463,6 +2492,37 @@ struct ixgbe_phy_info { | |||
2463 | bool multispeed_fiber; | 2492 | bool multispeed_fiber; |
2464 | }; | 2493 | }; |
2465 | 2494 | ||
2495 | #include "ixgbe_mbx.h" | ||
2496 | |||
2497 | struct ixgbe_mbx_operations { | ||
2498 | s32 (*init_params)(struct ixgbe_hw *hw); | ||
2499 | s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); | ||
2500 | s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); | ||
2501 | s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); | ||
2502 | s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); | ||
2503 | s32 (*check_for_msg)(struct ixgbe_hw *, u16); | ||
2504 | s32 (*check_for_ack)(struct ixgbe_hw *, u16); | ||
2505 | s32 (*check_for_rst)(struct ixgbe_hw *, u16); | ||
2506 | }; | ||
2507 | |||
2508 | struct ixgbe_mbx_stats { | ||
2509 | u32 msgs_tx; | ||
2510 | u32 msgs_rx; | ||
2511 | |||
2512 | u32 acks; | ||
2513 | u32 reqs; | ||
2514 | u32 rsts; | ||
2515 | }; | ||
2516 | |||
2517 | struct ixgbe_mbx_info { | ||
2518 | struct ixgbe_mbx_operations ops; | ||
2519 | struct ixgbe_mbx_stats stats; | ||
2520 | u32 timeout; | ||
2521 | u32 usec_delay; | ||
2522 | u32 v2p_mailbox; | ||
2523 | u16 size; | ||
2524 | }; | ||
2525 | |||
2466 | struct ixgbe_hw { | 2526 | struct ixgbe_hw { |
2467 | u8 __iomem *hw_addr; | 2527 | u8 __iomem *hw_addr; |
2468 | void *back; | 2528 | void *back; |
@@ -2472,6 +2532,7 @@ struct ixgbe_hw { | |||
2472 | struct ixgbe_phy_info phy; | 2532 | struct ixgbe_phy_info phy; |
2473 | struct ixgbe_eeprom_info eeprom; | 2533 | struct ixgbe_eeprom_info eeprom; |
2474 | struct ixgbe_bus_info bus; | 2534 | struct ixgbe_bus_info bus; |
2535 | struct ixgbe_mbx_info mbx; | ||
2475 | u16 device_id; | 2536 | u16 device_id; |
2476 | u16 vendor_id; | 2537 | u16 vendor_id; |
2477 | u16 subsystem_device_id; | 2538 | u16 subsystem_device_id; |
@@ -2486,6 +2547,7 @@ struct ixgbe_info { | |||
2486 | struct ixgbe_mac_operations *mac_ops; | 2547 | struct ixgbe_mac_operations *mac_ops; |
2487 | struct ixgbe_eeprom_operations *eeprom_ops; | 2548 | struct ixgbe_eeprom_operations *eeprom_ops; |
2488 | struct ixgbe_phy_operations *phy_ops; | 2549 | struct ixgbe_phy_operations *phy_ops; |
2550 | struct ixgbe_mbx_operations *mbx_ops; | ||
2489 | }; | 2551 | }; |
2490 | 2552 | ||
2491 | 2553 | ||