diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r-- | drivers/net/ethernet/intel/igbvf/defines.h | 128 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igbvf/ethtool.c | 73 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igbvf/igbvf.h | 103 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igbvf/mbx.c | 21 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igbvf/mbx.h | 53 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igbvf/netdev.c | 389 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igbvf/regs.h | 123 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igbvf/vf.c | 43 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igbvf/vf.h | 83 |
9 files changed, 500 insertions, 516 deletions
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index d9fa999b1685..ae3f28332fa0 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -29,94 +28,93 @@ | |||
29 | #define _E1000_DEFINES_H_ | 28 | #define _E1000_DEFINES_H_ |
30 | 29 | ||
31 | /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ | 30 | /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ |
32 | #define REQ_TX_DESCRIPTOR_MULTIPLE 8 | 31 | #define REQ_TX_DESCRIPTOR_MULTIPLE 8 |
33 | #define REQ_RX_DESCRIPTOR_MULTIPLE 8 | 32 | #define REQ_RX_DESCRIPTOR_MULTIPLE 8 |
34 | 33 | ||
35 | /* IVAR valid bit */ | 34 | /* IVAR valid bit */ |
36 | #define E1000_IVAR_VALID 0x80 | 35 | #define E1000_IVAR_VALID 0x80 |
37 | 36 | ||
38 | /* Receive Descriptor bit definitions */ | 37 | /* Receive Descriptor bit definitions */ |
39 | #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ | 38 | #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ |
40 | #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ | 39 | #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ |
41 | #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ | 40 | #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ |
42 | #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ | 41 | #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ |
43 | #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ | 42 | #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ |
44 | #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ | 43 | #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ |
45 | #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ | 44 | #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ |
46 | #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ | 45 | #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ |
47 | #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ | 46 | #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ |
48 | 47 | ||
49 | #define E1000_RXDEXT_STATERR_LB 0x00040000 | 48 | #define E1000_RXDEXT_STATERR_LB 0x00040000 |
50 | #define E1000_RXDEXT_STATERR_CE 0x01000000 | 49 | #define E1000_RXDEXT_STATERR_CE 0x01000000 |
51 | #define E1000_RXDEXT_STATERR_SE 0x02000000 | 50 | #define E1000_RXDEXT_STATERR_SE 0x02000000 |
52 | #define E1000_RXDEXT_STATERR_SEQ 0x04000000 | 51 | #define E1000_RXDEXT_STATERR_SEQ 0x04000000 |
53 | #define E1000_RXDEXT_STATERR_CXE 0x10000000 | 52 | #define E1000_RXDEXT_STATERR_CXE 0x10000000 |
54 | #define E1000_RXDEXT_STATERR_TCPE 0x20000000 | 53 | #define E1000_RXDEXT_STATERR_TCPE 0x20000000 |
55 | #define E1000_RXDEXT_STATERR_IPE 0x40000000 | 54 | #define E1000_RXDEXT_STATERR_IPE 0x40000000 |
56 | #define E1000_RXDEXT_STATERR_RXE 0x80000000 | 55 | #define E1000_RXDEXT_STATERR_RXE 0x80000000 |
57 | |||
58 | 56 | ||
59 | /* Same mask, but for extended and packet split descriptors */ | 57 | /* Same mask, but for extended and packet split descriptors */ |
60 | #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ | 58 | #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ |
61 | E1000_RXDEXT_STATERR_CE | \ | 59 | E1000_RXDEXT_STATERR_CE | \ |
62 | E1000_RXDEXT_STATERR_SE | \ | 60 | E1000_RXDEXT_STATERR_SE | \ |
63 | E1000_RXDEXT_STATERR_SEQ | \ | 61 | E1000_RXDEXT_STATERR_SEQ | \ |
64 | E1000_RXDEXT_STATERR_CXE | \ | 62 | E1000_RXDEXT_STATERR_CXE | \ |
65 | E1000_RXDEXT_STATERR_RXE) | 63 | E1000_RXDEXT_STATERR_RXE) |
66 | 64 | ||
67 | /* Device Control */ | 65 | /* Device Control */ |
68 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ | 66 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ |
69 | 67 | ||
70 | /* Device Status */ | 68 | /* Device Status */ |
71 | #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ | 69 | #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ |
72 | #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ | 70 | #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ |
73 | #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ | 71 | #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ |
74 | #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ | 72 | #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ |
75 | #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ | 73 | #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ |
76 | #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ | 74 | #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ |
77 | 75 | ||
78 | #define SPEED_10 10 | 76 | #define SPEED_10 10 |
79 | #define SPEED_100 100 | 77 | #define SPEED_100 100 |
80 | #define SPEED_1000 1000 | 78 | #define SPEED_1000 1000 |
81 | #define HALF_DUPLEX 1 | 79 | #define HALF_DUPLEX 1 |
82 | #define FULL_DUPLEX 2 | 80 | #define FULL_DUPLEX 2 |
83 | 81 | ||
84 | /* Transmit Descriptor bit definitions */ | 82 | /* Transmit Descriptor bit definitions */ |
85 | #define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ | 83 | #define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ |
86 | #define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ | 84 | #define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ |
87 | #define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ | 85 | #define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ |
88 | #define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ | 86 | #define E1000_TXD_STAT_DD 0x00000001 /* Desc Done */ |
89 | 87 | ||
90 | #define MAX_JUMBO_FRAME_SIZE 0x3F00 | 88 | #define MAX_JUMBO_FRAME_SIZE 0x3F00 |
91 | 89 | ||
92 | /* 802.1q VLAN Packet Size */ | 90 | /* 802.1q VLAN Packet Size */ |
93 | #define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ | 91 | #define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ |
94 | 92 | ||
95 | /* Error Codes */ | 93 | /* Error Codes */ |
96 | #define E1000_SUCCESS 0 | 94 | #define E1000_SUCCESS 0 |
97 | #define E1000_ERR_CONFIG 3 | 95 | #define E1000_ERR_CONFIG 3 |
98 | #define E1000_ERR_MAC_INIT 5 | 96 | #define E1000_ERR_MAC_INIT 5 |
99 | #define E1000_ERR_MBX 15 | 97 | #define E1000_ERR_MBX 15 |
100 | 98 | ||
101 | /* SRRCTL bit definitions */ | 99 | /* SRRCTL bit definitions */ |
102 | #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ | 100 | #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ |
103 | #define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 | 101 | #define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 |
104 | #define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ | 102 | #define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ |
105 | #define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 | 103 | #define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 |
106 | #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 | 104 | #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 |
107 | #define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 | 105 | #define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 |
108 | #define E1000_SRRCTL_DROP_EN 0x80000000 | 106 | #define E1000_SRRCTL_DROP_EN 0x80000000 |
109 | 107 | ||
110 | #define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F | 108 | #define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F |
111 | #define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 | 109 | #define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 |
112 | 110 | ||
113 | /* Additional Descriptor Control definitions */ | 111 | /* Additional Descriptor Control definitions */ |
114 | #define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ | 112 | #define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Que */ |
115 | #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ | 113 | #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */ |
116 | 114 | ||
117 | /* Direct Cache Access (DCA) definitions */ | 115 | /* Direct Cache Access (DCA) definitions */ |
118 | #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ | 116 | #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ |
119 | 117 | ||
120 | #define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ | 118 | #define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ |
121 | 119 | ||
122 | #endif /* _E1000_DEFINES_H_ */ | 120 | #endif /* _E1000_DEFINES_H_ */ |
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 2178f87e9f61..91a1190990ae 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -36,7 +35,6 @@ | |||
36 | #include "igbvf.h" | 35 | #include "igbvf.h" |
37 | #include <linux/if_vlan.h> | 36 | #include <linux/if_vlan.h> |
38 | 37 | ||
39 | |||
40 | struct igbvf_stats { | 38 | struct igbvf_stats { |
41 | char stat_string[ETH_GSTRING_LEN]; | 39 | char stat_string[ETH_GSTRING_LEN]; |
42 | int sizeof_stat; | 40 | int sizeof_stat; |
@@ -74,7 +72,7 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { | |||
74 | #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) | 72 | #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) |
75 | 73 | ||
76 | static int igbvf_get_settings(struct net_device *netdev, | 74 | static int igbvf_get_settings(struct net_device *netdev, |
77 | struct ethtool_cmd *ecmd) | 75 | struct ethtool_cmd *ecmd) |
78 | { | 76 | { |
79 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 77 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
80 | struct e1000_hw *hw = &adapter->hw; | 78 | struct e1000_hw *hw = &adapter->hw; |
@@ -111,18 +109,18 @@ static int igbvf_get_settings(struct net_device *netdev, | |||
111 | } | 109 | } |
112 | 110 | ||
113 | static int igbvf_set_settings(struct net_device *netdev, | 111 | static int igbvf_set_settings(struct net_device *netdev, |
114 | struct ethtool_cmd *ecmd) | 112 | struct ethtool_cmd *ecmd) |
115 | { | 113 | { |
116 | return -EOPNOTSUPP; | 114 | return -EOPNOTSUPP; |
117 | } | 115 | } |
118 | 116 | ||
119 | static void igbvf_get_pauseparam(struct net_device *netdev, | 117 | static void igbvf_get_pauseparam(struct net_device *netdev, |
120 | struct ethtool_pauseparam *pause) | 118 | struct ethtool_pauseparam *pause) |
121 | { | 119 | { |
122 | } | 120 | } |
123 | 121 | ||
124 | static int igbvf_set_pauseparam(struct net_device *netdev, | 122 | static int igbvf_set_pauseparam(struct net_device *netdev, |
125 | struct ethtool_pauseparam *pause) | 123 | struct ethtool_pauseparam *pause) |
126 | { | 124 | { |
127 | return -EOPNOTSUPP; | 125 | return -EOPNOTSUPP; |
128 | } | 126 | } |
@@ -130,12 +128,14 @@ static int igbvf_set_pauseparam(struct net_device *netdev, | |||
130 | static u32 igbvf_get_msglevel(struct net_device *netdev) | 128 | static u32 igbvf_get_msglevel(struct net_device *netdev) |
131 | { | 129 | { |
132 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 130 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
131 | |||
133 | return adapter->msg_enable; | 132 | return adapter->msg_enable; |
134 | } | 133 | } |
135 | 134 | ||
136 | static void igbvf_set_msglevel(struct net_device *netdev, u32 data) | 135 | static void igbvf_set_msglevel(struct net_device *netdev, u32 data) |
137 | { | 136 | { |
138 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 137 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
138 | |||
139 | adapter->msg_enable = data; | 139 | adapter->msg_enable = data; |
140 | } | 140 | } |
141 | 141 | ||
@@ -146,7 +146,7 @@ static int igbvf_get_regs_len(struct net_device *netdev) | |||
146 | } | 146 | } |
147 | 147 | ||
148 | static void igbvf_get_regs(struct net_device *netdev, | 148 | static void igbvf_get_regs(struct net_device *netdev, |
149 | struct ethtool_regs *regs, void *p) | 149 | struct ethtool_regs *regs, void *p) |
150 | { | 150 | { |
151 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 151 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
152 | struct e1000_hw *hw = &adapter->hw; | 152 | struct e1000_hw *hw = &adapter->hw; |
@@ -175,19 +175,19 @@ static int igbvf_get_eeprom_len(struct net_device *netdev) | |||
175 | } | 175 | } |
176 | 176 | ||
177 | static int igbvf_get_eeprom(struct net_device *netdev, | 177 | static int igbvf_get_eeprom(struct net_device *netdev, |
178 | struct ethtool_eeprom *eeprom, u8 *bytes) | 178 | struct ethtool_eeprom *eeprom, u8 *bytes) |
179 | { | 179 | { |
180 | return -EOPNOTSUPP; | 180 | return -EOPNOTSUPP; |
181 | } | 181 | } |
182 | 182 | ||
183 | static int igbvf_set_eeprom(struct net_device *netdev, | 183 | static int igbvf_set_eeprom(struct net_device *netdev, |
184 | struct ethtool_eeprom *eeprom, u8 *bytes) | 184 | struct ethtool_eeprom *eeprom, u8 *bytes) |
185 | { | 185 | { |
186 | return -EOPNOTSUPP; | 186 | return -EOPNOTSUPP; |
187 | } | 187 | } |
188 | 188 | ||
189 | static void igbvf_get_drvinfo(struct net_device *netdev, | 189 | static void igbvf_get_drvinfo(struct net_device *netdev, |
190 | struct ethtool_drvinfo *drvinfo) | 190 | struct ethtool_drvinfo *drvinfo) |
191 | { | 191 | { |
192 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 192 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
193 | 193 | ||
@@ -201,7 +201,7 @@ static void igbvf_get_drvinfo(struct net_device *netdev, | |||
201 | } | 201 | } |
202 | 202 | ||
203 | static void igbvf_get_ringparam(struct net_device *netdev, | 203 | static void igbvf_get_ringparam(struct net_device *netdev, |
204 | struct ethtool_ringparam *ring) | 204 | struct ethtool_ringparam *ring) |
205 | { | 205 | { |
206 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 206 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
207 | struct igbvf_ring *tx_ring = adapter->tx_ring; | 207 | struct igbvf_ring *tx_ring = adapter->tx_ring; |
@@ -214,7 +214,7 @@ static void igbvf_get_ringparam(struct net_device *netdev, | |||
214 | } | 214 | } |
215 | 215 | ||
216 | static int igbvf_set_ringparam(struct net_device *netdev, | 216 | static int igbvf_set_ringparam(struct net_device *netdev, |
217 | struct ethtool_ringparam *ring) | 217 | struct ethtool_ringparam *ring) |
218 | { | 218 | { |
219 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 219 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
220 | struct igbvf_ring *temp_ring; | 220 | struct igbvf_ring *temp_ring; |
@@ -255,10 +255,9 @@ static int igbvf_set_ringparam(struct net_device *netdev, | |||
255 | 255 | ||
256 | igbvf_down(adapter); | 256 | igbvf_down(adapter); |
257 | 257 | ||
258 | /* | 258 | /* We can't just free everything and then setup again, |
259 | * We can't just free everything and then setup again, | ||
260 | * because the ISRs in MSI-X mode get passed pointers | 259 | * because the ISRs in MSI-X mode get passed pointers |
261 | * to the tx and rx ring structs. | 260 | * to the Tx and Rx ring structs. |
262 | */ | 261 | */ |
263 | if (new_tx_count != adapter->tx_ring->count) { | 262 | if (new_tx_count != adapter->tx_ring->count) { |
264 | memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); | 263 | memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); |
@@ -283,7 +282,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, | |||
283 | 282 | ||
284 | igbvf_free_rx_resources(adapter->rx_ring); | 283 | igbvf_free_rx_resources(adapter->rx_ring); |
285 | 284 | ||
286 | memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); | 285 | memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring)); |
287 | } | 286 | } |
288 | err_setup: | 287 | err_setup: |
289 | igbvf_up(adapter); | 288 | igbvf_up(adapter); |
@@ -307,14 +306,13 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) | |||
307 | } | 306 | } |
308 | 307 | ||
309 | static void igbvf_diag_test(struct net_device *netdev, | 308 | static void igbvf_diag_test(struct net_device *netdev, |
310 | struct ethtool_test *eth_test, u64 *data) | 309 | struct ethtool_test *eth_test, u64 *data) |
311 | { | 310 | { |
312 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 311 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
313 | 312 | ||
314 | set_bit(__IGBVF_TESTING, &adapter->state); | 313 | set_bit(__IGBVF_TESTING, &adapter->state); |
315 | 314 | ||
316 | /* | 315 | /* Link test performed before hardware reset so autoneg doesn't |
317 | * Link test performed before hardware reset so autoneg doesn't | ||
318 | * interfere with test result | 316 | * interfere with test result |
319 | */ | 317 | */ |
320 | if (igbvf_link_test(adapter, &data[0])) | 318 | if (igbvf_link_test(adapter, &data[0])) |
@@ -325,20 +323,20 @@ static void igbvf_diag_test(struct net_device *netdev, | |||
325 | } | 323 | } |
326 | 324 | ||
327 | static void igbvf_get_wol(struct net_device *netdev, | 325 | static void igbvf_get_wol(struct net_device *netdev, |
328 | struct ethtool_wolinfo *wol) | 326 | struct ethtool_wolinfo *wol) |
329 | { | 327 | { |
330 | wol->supported = 0; | 328 | wol->supported = 0; |
331 | wol->wolopts = 0; | 329 | wol->wolopts = 0; |
332 | } | 330 | } |
333 | 331 | ||
334 | static int igbvf_set_wol(struct net_device *netdev, | 332 | static int igbvf_set_wol(struct net_device *netdev, |
335 | struct ethtool_wolinfo *wol) | 333 | struct ethtool_wolinfo *wol) |
336 | { | 334 | { |
337 | return -EOPNOTSUPP; | 335 | return -EOPNOTSUPP; |
338 | } | 336 | } |
339 | 337 | ||
340 | static int igbvf_get_coalesce(struct net_device *netdev, | 338 | static int igbvf_get_coalesce(struct net_device *netdev, |
341 | struct ethtool_coalesce *ec) | 339 | struct ethtool_coalesce *ec) |
342 | { | 340 | { |
343 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 341 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
344 | 342 | ||
@@ -351,13 +349,13 @@ static int igbvf_get_coalesce(struct net_device *netdev, | |||
351 | } | 349 | } |
352 | 350 | ||
353 | static int igbvf_set_coalesce(struct net_device *netdev, | 351 | static int igbvf_set_coalesce(struct net_device *netdev, |
354 | struct ethtool_coalesce *ec) | 352 | struct ethtool_coalesce *ec) |
355 | { | 353 | { |
356 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 354 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
357 | struct e1000_hw *hw = &adapter->hw; | 355 | struct e1000_hw *hw = &adapter->hw; |
358 | 356 | ||
359 | if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && | 357 | if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && |
360 | (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { | 358 | (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { |
361 | adapter->current_itr = ec->rx_coalesce_usecs << 2; | 359 | adapter->current_itr = ec->rx_coalesce_usecs << 2; |
362 | adapter->requested_itr = 1000000000 / | 360 | adapter->requested_itr = 1000000000 / |
363 | (adapter->current_itr * 256); | 361 | (adapter->current_itr * 256); |
@@ -366,8 +364,7 @@ static int igbvf_set_coalesce(struct net_device *netdev, | |||
366 | adapter->current_itr = IGBVF_START_ITR; | 364 | adapter->current_itr = IGBVF_START_ITR; |
367 | adapter->requested_itr = ec->rx_coalesce_usecs; | 365 | adapter->requested_itr = ec->rx_coalesce_usecs; |
368 | } else if (ec->rx_coalesce_usecs == 0) { | 366 | } else if (ec->rx_coalesce_usecs == 0) { |
369 | /* | 367 | /* The user's desire is to turn off interrupt throttling |
370 | * The user's desire is to turn off interrupt throttling | ||
371 | * altogether, but due to HW limitations, we can't do that. | 368 | * altogether, but due to HW limitations, we can't do that. |
372 | * Instead we set a very small value in EITR, which would | 369 | * Instead we set a very small value in EITR, which would |
373 | * allow ~967k interrupts per second, but allow the adapter's | 370 | * allow ~967k interrupts per second, but allow the adapter's |
@@ -376,8 +373,9 @@ static int igbvf_set_coalesce(struct net_device *netdev, | |||
376 | adapter->current_itr = 4; | 373 | adapter->current_itr = 4; |
377 | adapter->requested_itr = 1000000000 / | 374 | adapter->requested_itr = 1000000000 / |
378 | (adapter->current_itr * 256); | 375 | (adapter->current_itr * 256); |
379 | } else | 376 | } else { |
380 | return -EINVAL; | 377 | return -EINVAL; |
378 | } | ||
381 | 379 | ||
382 | writel(adapter->current_itr, | 380 | writel(adapter->current_itr, |
383 | hw->hw_addr + adapter->rx_ring->itr_register); | 381 | hw->hw_addr + adapter->rx_ring->itr_register); |
@@ -388,15 +386,15 @@ static int igbvf_set_coalesce(struct net_device *netdev, | |||
388 | static int igbvf_nway_reset(struct net_device *netdev) | 386 | static int igbvf_nway_reset(struct net_device *netdev) |
389 | { | 387 | { |
390 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 388 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
389 | |||
391 | if (netif_running(netdev)) | 390 | if (netif_running(netdev)) |
392 | igbvf_reinit_locked(adapter); | 391 | igbvf_reinit_locked(adapter); |
393 | return 0; | 392 | return 0; |
394 | } | 393 | } |
395 | 394 | ||
396 | |||
397 | static void igbvf_get_ethtool_stats(struct net_device *netdev, | 395 | static void igbvf_get_ethtool_stats(struct net_device *netdev, |
398 | struct ethtool_stats *stats, | 396 | struct ethtool_stats *stats, |
399 | u64 *data) | 397 | u64 *data) |
400 | { | 398 | { |
401 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 399 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
402 | int i; | 400 | int i; |
@@ -404,19 +402,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev, | |||
404 | igbvf_update_stats(adapter); | 402 | igbvf_update_stats(adapter); |
405 | for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { | 403 | for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { |
406 | char *p = (char *)adapter + | 404 | char *p = (char *)adapter + |
407 | igbvf_gstrings_stats[i].stat_offset; | 405 | igbvf_gstrings_stats[i].stat_offset; |
408 | char *b = (char *)adapter + | 406 | char *b = (char *)adapter + |
409 | igbvf_gstrings_stats[i].base_stat_offset; | 407 | igbvf_gstrings_stats[i].base_stat_offset; |
410 | data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == | 408 | data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == |
411 | sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : | 409 | sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : |
412 | (*(u32 *)p - *(u32 *)b)); | 410 | (*(u32 *)p - *(u32 *)b)); |
413 | } | 411 | } |
414 | |||
415 | } | 412 | } |
416 | 413 | ||
417 | static int igbvf_get_sset_count(struct net_device *dev, int stringset) | 414 | static int igbvf_get_sset_count(struct net_device *dev, int stringset) |
418 | { | 415 | { |
419 | switch(stringset) { | 416 | switch (stringset) { |
420 | case ETH_SS_TEST: | 417 | case ETH_SS_TEST: |
421 | return IGBVF_TEST_LEN; | 418 | return IGBVF_TEST_LEN; |
422 | case ETH_SS_STATS: | 419 | case ETH_SS_STATS: |
@@ -427,7 +424,7 @@ static int igbvf_get_sset_count(struct net_device *dev, int stringset) | |||
427 | } | 424 | } |
428 | 425 | ||
429 | static void igbvf_get_strings(struct net_device *netdev, u32 stringset, | 426 | static void igbvf_get_strings(struct net_device *netdev, u32 stringset, |
430 | u8 *data) | 427 | u8 *data) |
431 | { | 428 | { |
432 | u8 *p = data; | 429 | u8 *p = data; |
433 | int i; | 430 | int i; |
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index 7d6a25c8f889..f166baab8d7e 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -43,10 +42,10 @@ struct igbvf_info; | |||
43 | struct igbvf_adapter; | 42 | struct igbvf_adapter; |
44 | 43 | ||
45 | /* Interrupt defines */ | 44 | /* Interrupt defines */ |
46 | #define IGBVF_START_ITR 488 /* ~8000 ints/sec */ | 45 | #define IGBVF_START_ITR 488 /* ~8000 ints/sec */ |
47 | #define IGBVF_4K_ITR 980 | 46 | #define IGBVF_4K_ITR 980 |
48 | #define IGBVF_20K_ITR 196 | 47 | #define IGBVF_20K_ITR 196 |
49 | #define IGBVF_70K_ITR 56 | 48 | #define IGBVF_70K_ITR 56 |
50 | 49 | ||
51 | enum latency_range { | 50 | enum latency_range { |
52 | lowest_latency = 0, | 51 | lowest_latency = 0, |
@@ -55,56 +54,55 @@ enum latency_range { | |||
55 | latency_invalid = 255 | 54 | latency_invalid = 255 |
56 | }; | 55 | }; |
57 | 56 | ||
58 | |||
59 | /* Interrupt modes, as used by the IntMode parameter */ | 57 | /* Interrupt modes, as used by the IntMode parameter */ |
60 | #define IGBVF_INT_MODE_LEGACY 0 | 58 | #define IGBVF_INT_MODE_LEGACY 0 |
61 | #define IGBVF_INT_MODE_MSI 1 | 59 | #define IGBVF_INT_MODE_MSI 1 |
62 | #define IGBVF_INT_MODE_MSIX 2 | 60 | #define IGBVF_INT_MODE_MSIX 2 |
63 | 61 | ||
64 | /* Tx/Rx descriptor defines */ | 62 | /* Tx/Rx descriptor defines */ |
65 | #define IGBVF_DEFAULT_TXD 256 | 63 | #define IGBVF_DEFAULT_TXD 256 |
66 | #define IGBVF_MAX_TXD 4096 | 64 | #define IGBVF_MAX_TXD 4096 |
67 | #define IGBVF_MIN_TXD 80 | 65 | #define IGBVF_MIN_TXD 80 |
68 | 66 | ||
69 | #define IGBVF_DEFAULT_RXD 256 | 67 | #define IGBVF_DEFAULT_RXD 256 |
70 | #define IGBVF_MAX_RXD 4096 | 68 | #define IGBVF_MAX_RXD 4096 |
71 | #define IGBVF_MIN_RXD 80 | 69 | #define IGBVF_MIN_RXD 80 |
72 | 70 | ||
73 | #define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ | 71 | #define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ |
74 | #define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ | 72 | #define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ |
75 | 73 | ||
76 | /* RX descriptor control thresholds. | 74 | /* RX descriptor control thresholds. |
77 | * PTHRESH - MAC will consider prefetch if it has fewer than this number of | 75 | * PTHRESH - MAC will consider prefetch if it has fewer than this number of |
78 | * descriptors available in its onboard memory. | 76 | * descriptors available in its onboard memory. |
79 | * Setting this to 0 disables RX descriptor prefetch. | 77 | * Setting this to 0 disables RX descriptor prefetch. |
80 | * HTHRESH - MAC will only prefetch if there are at least this many descriptors | 78 | * HTHRESH - MAC will only prefetch if there are at least this many descriptors |
81 | * available in host memory. | 79 | * available in host memory. |
82 | * If PTHRESH is 0, this should also be 0. | 80 | * If PTHRESH is 0, this should also be 0. |
83 | * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back | 81 | * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back |
84 | * descriptors until either it has this many to write back, or the | 82 | * descriptors until either it has this many to write back, or the |
85 | * ITR timer expires. | 83 | * ITR timer expires. |
86 | */ | 84 | */ |
87 | #define IGBVF_RX_PTHRESH 16 | 85 | #define IGBVF_RX_PTHRESH 16 |
88 | #define IGBVF_RX_HTHRESH 8 | 86 | #define IGBVF_RX_HTHRESH 8 |
89 | #define IGBVF_RX_WTHRESH 1 | 87 | #define IGBVF_RX_WTHRESH 1 |
90 | 88 | ||
91 | /* this is the size past which hardware will drop packets when setting LPE=0 */ | 89 | /* this is the size past which hardware will drop packets when setting LPE=0 */ |
92 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | 90 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 |
93 | 91 | ||
94 | #define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ | 92 | #define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ |
95 | 93 | ||
96 | /* How many Tx Descriptors do we need to call netif_wake_queue ? */ | 94 | /* How many Tx Descriptors do we need to call netif_wake_queue ? */ |
97 | #define IGBVF_TX_QUEUE_WAKE 32 | 95 | #define IGBVF_TX_QUEUE_WAKE 32 |
98 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | 96 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
99 | #define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ | 97 | #define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ |
100 | 98 | ||
101 | #define AUTO_ALL_MODES 0 | 99 | #define AUTO_ALL_MODES 0 |
102 | #define IGBVF_EEPROM_APME 0x0400 | 100 | #define IGBVF_EEPROM_APME 0x0400 |
103 | 101 | ||
104 | #define IGBVF_MNG_VLAN_NONE (-1) | 102 | #define IGBVF_MNG_VLAN_NONE (-1) |
105 | 103 | ||
106 | /* Number of packet split data buffers (not including the header buffer) */ | 104 | /* Number of packet split data buffers (not including the header buffer) */ |
107 | #define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) | 105 | #define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) |
108 | 106 | ||
109 | enum igbvf_boards { | 107 | enum igbvf_boards { |
110 | board_vf, | 108 | board_vf, |
@@ -116,8 +114,7 @@ struct igbvf_queue_stats { | |||
116 | u64 bytes; | 114 | u64 bytes; |
117 | }; | 115 | }; |
118 | 116 | ||
119 | /* | 117 | /* wrappers around a pointer to a socket buffer, |
120 | * wrappers around a pointer to a socket buffer, | ||
121 | * so a DMA handle can be stored along with the buffer | 118 | * so a DMA handle can be stored along with the buffer |
122 | */ | 119 | */ |
123 | struct igbvf_buffer { | 120 | struct igbvf_buffer { |
@@ -148,10 +145,10 @@ union igbvf_desc { | |||
148 | 145 | ||
149 | struct igbvf_ring { | 146 | struct igbvf_ring { |
150 | struct igbvf_adapter *adapter; /* backlink */ | 147 | struct igbvf_adapter *adapter; /* backlink */ |
151 | union igbvf_desc *desc; /* pointer to ring memory */ | 148 | union igbvf_desc *desc; /* pointer to ring memory */ |
152 | dma_addr_t dma; /* phys address of ring */ | 149 | dma_addr_t dma; /* phys address of ring */ |
153 | unsigned int size; /* length of ring in bytes */ | 150 | unsigned int size; /* length of ring in bytes */ |
154 | unsigned int count; /* number of desc. in ring */ | 151 | unsigned int count; /* number of desc. in ring */ |
155 | 152 | ||
156 | u16 next_to_use; | 153 | u16 next_to_use; |
157 | u16 next_to_clean; | 154 | u16 next_to_clean; |
@@ -202,9 +199,7 @@ struct igbvf_adapter { | |||
202 | u32 requested_itr; /* ints/sec or adaptive */ | 199 | u32 requested_itr; /* ints/sec or adaptive */ |
203 | u32 current_itr; /* Actual ITR register value, not ints/sec */ | 200 | u32 current_itr; /* Actual ITR register value, not ints/sec */ |
204 | 201 | ||
205 | /* | 202 | /* Tx */ |
206 | * Tx | ||
207 | */ | ||
208 | struct igbvf_ring *tx_ring /* One per active queue */ | 203 | struct igbvf_ring *tx_ring /* One per active queue */ |
209 | ____cacheline_aligned_in_smp; | 204 | ____cacheline_aligned_in_smp; |
210 | 205 | ||
@@ -226,9 +221,7 @@ struct igbvf_adapter { | |||
226 | u32 tx_fifo_size; | 221 | u32 tx_fifo_size; |
227 | u32 tx_dma_failed; | 222 | u32 tx_dma_failed; |
228 | 223 | ||
229 | /* | 224 | /* Rx */ |
230 | * Rx | ||
231 | */ | ||
232 | struct igbvf_ring *rx_ring; | 225 | struct igbvf_ring *rx_ring; |
233 | 226 | ||
234 | u32 rx_int_delay; | 227 | u32 rx_int_delay; |
@@ -249,7 +242,7 @@ struct igbvf_adapter { | |||
249 | struct net_device *netdev; | 242 | struct net_device *netdev; |
250 | struct pci_dev *pdev; | 243 | struct pci_dev *pdev; |
251 | struct net_device_stats net_stats; | 244 | struct net_device_stats net_stats; |
252 | spinlock_t stats_lock; /* prevent concurrent stats updates */ | 245 | spinlock_t stats_lock; /* prevent concurrent stats updates */ |
253 | 246 | ||
254 | /* structs defined in e1000_hw.h */ | 247 | /* structs defined in e1000_hw.h */ |
255 | struct e1000_hw hw; | 248 | struct e1000_hw hw; |
@@ -286,16 +279,16 @@ struct igbvf_adapter { | |||
286 | }; | 279 | }; |
287 | 280 | ||
288 | struct igbvf_info { | 281 | struct igbvf_info { |
289 | enum e1000_mac_type mac; | 282 | enum e1000_mac_type mac; |
290 | unsigned int flags; | 283 | unsigned int flags; |
291 | u32 pba; | 284 | u32 pba; |
292 | void (*init_ops)(struct e1000_hw *); | 285 | void (*init_ops)(struct e1000_hw *); |
293 | s32 (*get_variants)(struct igbvf_adapter *); | 286 | s32 (*get_variants)(struct igbvf_adapter *); |
294 | }; | 287 | }; |
295 | 288 | ||
296 | /* hardware capability, feature, and workaround flags */ | 289 | /* hardware capability, feature, and workaround flags */ |
297 | #define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) | 290 | #define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) |
298 | #define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) | 291 | #define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) |
299 | #define IGBVF_RX_DESC_ADV(R, i) \ | 292 | #define IGBVF_RX_DESC_ADV(R, i) \ |
300 | (&((((R).desc))[i].rx_desc)) | 293 | (&((((R).desc))[i].rx_desc)) |
301 | #define IGBVF_TX_DESC_ADV(R, i) \ | 294 | #define IGBVF_TX_DESC_ADV(R, i) \ |
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index b4b65bc9fc5d..7b6cb4c3764c 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -54,10 +53,10 @@ out: | |||
54 | } | 53 | } |
55 | 54 | ||
56 | /** | 55 | /** |
57 | * e1000_poll_for_ack - Wait for message acknowledgement | 56 | * e1000_poll_for_ack - Wait for message acknowledgment |
58 | * @hw: pointer to the HW structure | 57 | * @hw: pointer to the HW structure |
59 | * | 58 | * |
60 | * returns SUCCESS if it successfully received a message acknowledgement | 59 | * returns SUCCESS if it successfully received a message acknowledgment |
61 | **/ | 60 | **/ |
62 | static s32 e1000_poll_for_ack(struct e1000_hw *hw) | 61 | static s32 e1000_poll_for_ack(struct e1000_hw *hw) |
63 | { | 62 | { |
@@ -218,7 +217,7 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) | |||
218 | s32 ret_val = -E1000_ERR_MBX; | 217 | s32 ret_val = -E1000_ERR_MBX; |
219 | 218 | ||
220 | if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | | 219 | if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | |
221 | E1000_V2PMAILBOX_RSTI))) { | 220 | E1000_V2PMAILBOX_RSTI))) { |
222 | ret_val = E1000_SUCCESS; | 221 | ret_val = E1000_SUCCESS; |
223 | hw->mbx.stats.rsts++; | 222 | hw->mbx.stats.rsts++; |
224 | } | 223 | } |
@@ -239,7 +238,7 @@ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) | |||
239 | /* Take ownership of the buffer */ | 238 | /* Take ownership of the buffer */ |
240 | ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); | 239 | ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); |
241 | 240 | ||
242 | /* reserve mailbox for vf use */ | 241 | /* reserve mailbox for VF use */ |
243 | if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) | 242 | if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) |
244 | ret_val = E1000_SUCCESS; | 243 | ret_val = E1000_SUCCESS; |
245 | 244 | ||
@@ -283,7 +282,7 @@ out_no_write: | |||
283 | } | 282 | } |
284 | 283 | ||
285 | /** | 284 | /** |
286 | * e1000_read_mbx_vf - Reads a message from the inbox intended for vf | 285 | * e1000_read_mbx_vf - Reads a message from the inbox intended for VF |
287 | * @hw: pointer to the HW structure | 286 | * @hw: pointer to the HW structure |
288 | * @msg: The message buffer | 287 | * @msg: The message buffer |
289 | * @size: Length of buffer | 288 | * @size: Length of buffer |
@@ -315,17 +314,18 @@ out_no_read: | |||
315 | } | 314 | } |
316 | 315 | ||
317 | /** | 316 | /** |
318 | * e1000_init_mbx_params_vf - set initial values for vf mailbox | 317 | * e1000_init_mbx_params_vf - set initial values for VF mailbox |
319 | * @hw: pointer to the HW structure | 318 | * @hw: pointer to the HW structure |
320 | * | 319 | * |
321 | * Initializes the hw->mbx struct to correct values for vf mailbox | 320 | * Initializes the hw->mbx struct to correct values for VF mailbox |
322 | */ | 321 | */ |
323 | s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) | 322 | s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) |
324 | { | 323 | { |
325 | struct e1000_mbx_info *mbx = &hw->mbx; | 324 | struct e1000_mbx_info *mbx = &hw->mbx; |
326 | 325 | ||
327 | /* start mailbox as timed out and let the reset_hw call set the timeout | 326 | /* start mailbox as timed out and let the reset_hw call set the timeout |
328 | * value to being communications */ | 327 | * value to being communications |
328 | */ | ||
329 | mbx->timeout = 0; | 329 | mbx->timeout = 0; |
330 | mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; | 330 | mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; |
331 | 331 | ||
@@ -347,4 +347,3 @@ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) | |||
347 | 347 | ||
348 | return E1000_SUCCESS; | 348 | return E1000_SUCCESS; |
349 | } | 349 | } |
350 | |||
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index 24370bcb0e22..f800bf8eedae 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -30,44 +29,44 @@ | |||
30 | 29 | ||
31 | #include "vf.h" | 30 | #include "vf.h" |
32 | 31 | ||
33 | #define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ | 32 | #define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ |
34 | #define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ | 33 | #define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ |
35 | #define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ | 34 | #define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ |
36 | #define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ | 35 | #define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ |
37 | #define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ | 36 | #define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ |
38 | #define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ | 37 | #define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ |
39 | #define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ | 38 | #define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ |
40 | #define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ | 39 | #define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ |
41 | #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ | 40 | #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ |
42 | 41 | ||
43 | #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ | 42 | #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ |
44 | 43 | ||
45 | /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the | 44 | /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the |
46 | * PF. The reverse is true if it is E1000_PF_*. | 45 | * PF. The reverse is true if it is E1000_PF_*. |
47 | * Message ACK's are the value or'd with 0xF0000000 | 46 | * Message ACK's are the value or'd with 0xF0000000 |
48 | */ | 47 | */ |
49 | #define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with | 48 | /* Messages below or'd with this are the ACK */ |
50 | * this are the ACK */ | 49 | #define E1000_VT_MSGTYPE_ACK 0x80000000 |
51 | #define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with | 50 | /* Messages below or'd with this are the NACK */ |
52 | * this are the NACK */ | 51 | #define E1000_VT_MSGTYPE_NACK 0x40000000 |
53 | #define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still | 52 | /* Indicates that VF is still clear to send requests */ |
54 | clear to send requests */ | 53 | #define E1000_VT_MSGTYPE_CTS 0x20000000 |
55 | 54 | ||
56 | /* We have a total wait time of 1s for vf mailbox posted messages */ | 55 | /* We have a total wait time of 1s for vf mailbox posted messages */ |
57 | #define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ | 56 | #define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mbx timeout */ |
58 | #define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ | 57 | #define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ |
59 | 58 | ||
60 | #define E1000_VT_MSGINFO_SHIFT 16 | 59 | #define E1000_VT_MSGINFO_SHIFT 16 |
61 | /* bits 23:16 are used for exra info for certain messages */ | 60 | /* bits 23:16 are used for exra info for certain messages */ |
62 | #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) | 61 | #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) |
63 | 62 | ||
64 | #define E1000_VF_RESET 0x01 /* VF requests reset */ | 63 | #define E1000_VF_RESET 0x01 /* VF requests reset */ |
65 | #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ | 64 | #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ |
66 | #define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ | 65 | #define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ |
67 | #define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ | 66 | #define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ |
68 | #define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ | 67 | #define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ |
69 | 68 | ||
70 | #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ | 69 | #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ |
71 | 70 | ||
72 | void e1000_init_mbx_ops_generic(struct e1000_hw *hw); | 71 | void e1000_init_mbx_ops_generic(struct e1000_hw *hw); |
73 | s32 e1000_init_mbx_params_vf(struct e1000_hw *); | 72 | s32 e1000_init_mbx_params_vf(struct e1000_hw *); |
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index ebf9d4a42fdd..55f1404f133e 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -66,26 +65,27 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *); | |||
66 | static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); | 65 | static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); |
67 | 66 | ||
68 | static struct igbvf_info igbvf_vf_info = { | 67 | static struct igbvf_info igbvf_vf_info = { |
69 | .mac = e1000_vfadapt, | 68 | .mac = e1000_vfadapt, |
70 | .flags = 0, | 69 | .flags = 0, |
71 | .pba = 10, | 70 | .pba = 10, |
72 | .init_ops = e1000_init_function_pointers_vf, | 71 | .init_ops = e1000_init_function_pointers_vf, |
73 | }; | 72 | }; |
74 | 73 | ||
75 | static struct igbvf_info igbvf_i350_vf_info = { | 74 | static struct igbvf_info igbvf_i350_vf_info = { |
76 | .mac = e1000_vfadapt_i350, | 75 | .mac = e1000_vfadapt_i350, |
77 | .flags = 0, | 76 | .flags = 0, |
78 | .pba = 10, | 77 | .pba = 10, |
79 | .init_ops = e1000_init_function_pointers_vf, | 78 | .init_ops = e1000_init_function_pointers_vf, |
80 | }; | 79 | }; |
81 | 80 | ||
82 | static const struct igbvf_info *igbvf_info_tbl[] = { | 81 | static const struct igbvf_info *igbvf_info_tbl[] = { |
83 | [board_vf] = &igbvf_vf_info, | 82 | [board_vf] = &igbvf_vf_info, |
84 | [board_i350_vf] = &igbvf_i350_vf_info, | 83 | [board_i350_vf] = &igbvf_i350_vf_info, |
85 | }; | 84 | }; |
86 | 85 | ||
87 | /** | 86 | /** |
88 | * igbvf_desc_unused - calculate if we have unused descriptors | 87 | * igbvf_desc_unused - calculate if we have unused descriptors |
88 | * @rx_ring: address of receive ring structure | ||
89 | **/ | 89 | **/ |
90 | static int igbvf_desc_unused(struct igbvf_ring *ring) | 90 | static int igbvf_desc_unused(struct igbvf_ring *ring) |
91 | { | 91 | { |
@@ -103,9 +103,9 @@ static int igbvf_desc_unused(struct igbvf_ring *ring) | |||
103 | * @skb: pointer to sk_buff to be indicated to stack | 103 | * @skb: pointer to sk_buff to be indicated to stack |
104 | **/ | 104 | **/ |
105 | static void igbvf_receive_skb(struct igbvf_adapter *adapter, | 105 | static void igbvf_receive_skb(struct igbvf_adapter *adapter, |
106 | struct net_device *netdev, | 106 | struct net_device *netdev, |
107 | struct sk_buff *skb, | 107 | struct sk_buff *skb, |
108 | u32 status, u16 vlan) | 108 | u32 status, u16 vlan) |
109 | { | 109 | { |
110 | u16 vid; | 110 | u16 vid; |
111 | 111 | ||
@@ -123,7 +123,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter, | |||
123 | } | 123 | } |
124 | 124 | ||
125 | static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, | 125 | static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, |
126 | u32 status_err, struct sk_buff *skb) | 126 | u32 status_err, struct sk_buff *skb) |
127 | { | 127 | { |
128 | skb_checksum_none_assert(skb); | 128 | skb_checksum_none_assert(skb); |
129 | 129 | ||
@@ -153,7 +153,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, | |||
153 | * @cleaned_count: number of buffers to repopulate | 153 | * @cleaned_count: number of buffers to repopulate |
154 | **/ | 154 | **/ |
155 | static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | 155 | static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, |
156 | int cleaned_count) | 156 | int cleaned_count) |
157 | { | 157 | { |
158 | struct igbvf_adapter *adapter = rx_ring->adapter; | 158 | struct igbvf_adapter *adapter = rx_ring->adapter; |
159 | struct net_device *netdev = adapter->netdev; | 159 | struct net_device *netdev = adapter->netdev; |
@@ -188,8 +188,8 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | |||
188 | } | 188 | } |
189 | buffer_info->page_dma = | 189 | buffer_info->page_dma = |
190 | dma_map_page(&pdev->dev, buffer_info->page, | 190 | dma_map_page(&pdev->dev, buffer_info->page, |
191 | buffer_info->page_offset, | 191 | buffer_info->page_offset, |
192 | PAGE_SIZE / 2, | 192 | PAGE_SIZE / 2, |
193 | DMA_FROM_DEVICE); | 193 | DMA_FROM_DEVICE); |
194 | if (dma_mapping_error(&pdev->dev, | 194 | if (dma_mapping_error(&pdev->dev, |
195 | buffer_info->page_dma)) { | 195 | buffer_info->page_dma)) { |
@@ -209,7 +209,7 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | |||
209 | 209 | ||
210 | buffer_info->skb = skb; | 210 | buffer_info->skb = skb; |
211 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, | 211 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, |
212 | bufsz, | 212 | bufsz, |
213 | DMA_FROM_DEVICE); | 213 | DMA_FROM_DEVICE); |
214 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | 214 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
215 | dev_kfree_skb(buffer_info->skb); | 215 | dev_kfree_skb(buffer_info->skb); |
@@ -219,14 +219,14 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | |||
219 | } | 219 | } |
220 | } | 220 | } |
221 | /* Refresh the desc even if buffer_addrs didn't change because | 221 | /* Refresh the desc even if buffer_addrs didn't change because |
222 | * each write-back erases this info. */ | 222 | * each write-back erases this info. |
223 | */ | ||
223 | if (adapter->rx_ps_hdr_size) { | 224 | if (adapter->rx_ps_hdr_size) { |
224 | rx_desc->read.pkt_addr = | 225 | rx_desc->read.pkt_addr = |
225 | cpu_to_le64(buffer_info->page_dma); | 226 | cpu_to_le64(buffer_info->page_dma); |
226 | rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); | 227 | rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); |
227 | } else { | 228 | } else { |
228 | rx_desc->read.pkt_addr = | 229 | rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); |
229 | cpu_to_le64(buffer_info->dma); | ||
230 | rx_desc->read.hdr_addr = 0; | 230 | rx_desc->read.hdr_addr = 0; |
231 | } | 231 | } |
232 | 232 | ||
@@ -247,7 +247,8 @@ no_buffers: | |||
247 | /* Force memory writes to complete before letting h/w | 247 | /* Force memory writes to complete before letting h/w |
248 | * know there are new descriptors to fetch. (Only | 248 | * know there are new descriptors to fetch. (Only |
249 | * applicable for weak-ordered memory model archs, | 249 | * applicable for weak-ordered memory model archs, |
250 | * such as IA-64). */ | 250 | * such as IA-64). |
251 | */ | ||
251 | wmb(); | 252 | wmb(); |
252 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | 253 | writel(i, adapter->hw.hw_addr + rx_ring->tail); |
253 | } | 254 | } |
@@ -261,7 +262,7 @@ no_buffers: | |||
261 | * is no guarantee that everything was cleaned | 262 | * is no guarantee that everything was cleaned |
262 | **/ | 263 | **/ |
263 | static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, | 264 | static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, |
264 | int *work_done, int work_to_do) | 265 | int *work_done, int work_to_do) |
265 | { | 266 | { |
266 | struct igbvf_ring *rx_ring = adapter->rx_ring; | 267 | struct igbvf_ring *rx_ring = adapter->rx_ring; |
267 | struct net_device *netdev = adapter->netdev; | 268 | struct net_device *netdev = adapter->netdev; |
@@ -292,8 +293,9 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, | |||
292 | * that case, it fills the header buffer and spills the rest | 293 | * that case, it fills the header buffer and spills the rest |
293 | * into the page. | 294 | * into the page. |
294 | */ | 295 | */ |
295 | hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & | 296 | hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) |
296 | E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; | 297 | & E1000_RXDADV_HDRBUFLEN_MASK) >> |
298 | E1000_RXDADV_HDRBUFLEN_SHIFT; | ||
297 | if (hlen > adapter->rx_ps_hdr_size) | 299 | if (hlen > adapter->rx_ps_hdr_size) |
298 | hlen = adapter->rx_ps_hdr_size; | 300 | hlen = adapter->rx_ps_hdr_size; |
299 | 301 | ||
@@ -306,7 +308,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, | |||
306 | buffer_info->skb = NULL; | 308 | buffer_info->skb = NULL; |
307 | if (!adapter->rx_ps_hdr_size) { | 309 | if (!adapter->rx_ps_hdr_size) { |
308 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 310 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
309 | adapter->rx_buffer_len, | 311 | adapter->rx_buffer_len, |
310 | DMA_FROM_DEVICE); | 312 | DMA_FROM_DEVICE); |
311 | buffer_info->dma = 0; | 313 | buffer_info->dma = 0; |
312 | skb_put(skb, length); | 314 | skb_put(skb, length); |
@@ -315,21 +317,21 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, | |||
315 | 317 | ||
316 | if (!skb_shinfo(skb)->nr_frags) { | 318 | if (!skb_shinfo(skb)->nr_frags) { |
317 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 319 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
318 | adapter->rx_ps_hdr_size, | 320 | adapter->rx_ps_hdr_size, |
319 | DMA_FROM_DEVICE); | 321 | DMA_FROM_DEVICE); |
320 | skb_put(skb, hlen); | 322 | skb_put(skb, hlen); |
321 | } | 323 | } |
322 | 324 | ||
323 | if (length) { | 325 | if (length) { |
324 | dma_unmap_page(&pdev->dev, buffer_info->page_dma, | 326 | dma_unmap_page(&pdev->dev, buffer_info->page_dma, |
325 | PAGE_SIZE / 2, | 327 | PAGE_SIZE / 2, |
326 | DMA_FROM_DEVICE); | 328 | DMA_FROM_DEVICE); |
327 | buffer_info->page_dma = 0; | 329 | buffer_info->page_dma = 0; |
328 | 330 | ||
329 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | 331 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
330 | buffer_info->page, | 332 | buffer_info->page, |
331 | buffer_info->page_offset, | 333 | buffer_info->page_offset, |
332 | length); | 334 | length); |
333 | 335 | ||
334 | if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || | 336 | if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || |
335 | (page_count(buffer_info->page) != 1)) | 337 | (page_count(buffer_info->page) != 1)) |
@@ -370,7 +372,7 @@ send_up: | |||
370 | skb->protocol = eth_type_trans(skb, netdev); | 372 | skb->protocol = eth_type_trans(skb, netdev); |
371 | 373 | ||
372 | igbvf_receive_skb(adapter, netdev, skb, staterr, | 374 | igbvf_receive_skb(adapter, netdev, skb, staterr, |
373 | rx_desc->wb.upper.vlan); | 375 | rx_desc->wb.upper.vlan); |
374 | 376 | ||
375 | next_desc: | 377 | next_desc: |
376 | rx_desc->wb.upper.status_error = 0; | 378 | rx_desc->wb.upper.status_error = 0; |
@@ -402,7 +404,7 @@ next_desc: | |||
402 | } | 404 | } |
403 | 405 | ||
404 | static void igbvf_put_txbuf(struct igbvf_adapter *adapter, | 406 | static void igbvf_put_txbuf(struct igbvf_adapter *adapter, |
405 | struct igbvf_buffer *buffer_info) | 407 | struct igbvf_buffer *buffer_info) |
406 | { | 408 | { |
407 | if (buffer_info->dma) { | 409 | if (buffer_info->dma) { |
408 | if (buffer_info->mapped_as_page) | 410 | if (buffer_info->mapped_as_page) |
@@ -431,7 +433,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter, | |||
431 | * Return 0 on success, negative on failure | 433 | * Return 0 on success, negative on failure |
432 | **/ | 434 | **/ |
433 | int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, | 435 | int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, |
434 | struct igbvf_ring *tx_ring) | 436 | struct igbvf_ring *tx_ring) |
435 | { | 437 | { |
436 | struct pci_dev *pdev = adapter->pdev; | 438 | struct pci_dev *pdev = adapter->pdev; |
437 | int size; | 439 | int size; |
@@ -458,7 +460,7 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, | |||
458 | err: | 460 | err: |
459 | vfree(tx_ring->buffer_info); | 461 | vfree(tx_ring->buffer_info); |
460 | dev_err(&adapter->pdev->dev, | 462 | dev_err(&adapter->pdev->dev, |
461 | "Unable to allocate memory for the transmit descriptor ring\n"); | 463 | "Unable to allocate memory for the transmit descriptor ring\n"); |
462 | return -ENOMEM; | 464 | return -ENOMEM; |
463 | } | 465 | } |
464 | 466 | ||
@@ -501,7 +503,7 @@ err: | |||
501 | vfree(rx_ring->buffer_info); | 503 | vfree(rx_ring->buffer_info); |
502 | rx_ring->buffer_info = NULL; | 504 | rx_ring->buffer_info = NULL; |
503 | dev_err(&adapter->pdev->dev, | 505 | dev_err(&adapter->pdev->dev, |
504 | "Unable to allocate memory for the receive descriptor ring\n"); | 506 | "Unable to allocate memory for the receive descriptor ring\n"); |
505 | return -ENOMEM; | 507 | return -ENOMEM; |
506 | } | 508 | } |
507 | 509 | ||
@@ -578,13 +580,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) | |||
578 | for (i = 0; i < rx_ring->count; i++) { | 580 | for (i = 0; i < rx_ring->count; i++) { |
579 | buffer_info = &rx_ring->buffer_info[i]; | 581 | buffer_info = &rx_ring->buffer_info[i]; |
580 | if (buffer_info->dma) { | 582 | if (buffer_info->dma) { |
581 | if (adapter->rx_ps_hdr_size){ | 583 | if (adapter->rx_ps_hdr_size) { |
582 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 584 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
583 | adapter->rx_ps_hdr_size, | 585 | adapter->rx_ps_hdr_size, |
584 | DMA_FROM_DEVICE); | 586 | DMA_FROM_DEVICE); |
585 | } else { | 587 | } else { |
586 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 588 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
587 | adapter->rx_buffer_len, | 589 | adapter->rx_buffer_len, |
588 | DMA_FROM_DEVICE); | 590 | DMA_FROM_DEVICE); |
589 | } | 591 | } |
590 | buffer_info->dma = 0; | 592 | buffer_info->dma = 0; |
@@ -599,7 +601,7 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) | |||
599 | if (buffer_info->page_dma) | 601 | if (buffer_info->page_dma) |
600 | dma_unmap_page(&pdev->dev, | 602 | dma_unmap_page(&pdev->dev, |
601 | buffer_info->page_dma, | 603 | buffer_info->page_dma, |
602 | PAGE_SIZE / 2, | 604 | PAGE_SIZE / 2, |
603 | DMA_FROM_DEVICE); | 605 | DMA_FROM_DEVICE); |
604 | put_page(buffer_info->page); | 606 | put_page(buffer_info->page); |
605 | buffer_info->page = NULL; | 607 | buffer_info->page = NULL; |
@@ -638,7 +640,7 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) | |||
638 | rx_ring->buffer_info = NULL; | 640 | rx_ring->buffer_info = NULL; |
639 | 641 | ||
640 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | 642 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, |
641 | rx_ring->dma); | 643 | rx_ring->dma); |
642 | rx_ring->desc = NULL; | 644 | rx_ring->desc = NULL; |
643 | } | 645 | } |
644 | 646 | ||
@@ -649,13 +651,12 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) | |||
649 | * @packets: the number of packets during this measurement interval | 651 | * @packets: the number of packets during this measurement interval |
650 | * @bytes: the number of bytes during this measurement interval | 652 | * @bytes: the number of bytes during this measurement interval |
651 | * | 653 | * |
652 | * Stores a new ITR value based on packets and byte | 654 | * Stores a new ITR value based on packets and byte counts during the last |
653 | * counts during the last interrupt. The advantage of per interrupt | 655 | * interrupt. The advantage of per interrupt computation is faster updates |
654 | * computation is faster updates and more accurate ITR for the current | 656 | * and more accurate ITR for the current traffic pattern. Constants in this |
655 | * traffic pattern. Constants in this function were computed | 657 | * function were computed based on theoretical maximum wire speed and thresholds |
656 | * based on theoretical maximum wire speed and thresholds were set based | 658 | * were set based on testing data as well as attempting to minimize response |
657 | * on testing data as well as attempting to minimize response time | 659 | * time while increasing bulk throughput. |
658 | * while increasing bulk throughput. | ||
659 | **/ | 660 | **/ |
660 | static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, | 661 | static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, |
661 | enum latency_range itr_setting, | 662 | enum latency_range itr_setting, |
@@ -744,17 +745,15 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) | |||
744 | 745 | ||
745 | new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); | 746 | new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); |
746 | 747 | ||
747 | |||
748 | if (new_itr != adapter->tx_ring->itr_val) { | 748 | if (new_itr != adapter->tx_ring->itr_val) { |
749 | u32 current_itr = adapter->tx_ring->itr_val; | 749 | u32 current_itr = adapter->tx_ring->itr_val; |
750 | /* | 750 | /* this attempts to bias the interrupt rate towards Bulk |
751 | * this attempts to bias the interrupt rate towards Bulk | ||
752 | * by adding intermediate steps when interrupt rate is | 751 | * by adding intermediate steps when interrupt rate is |
753 | * increasing | 752 | * increasing |
754 | */ | 753 | */ |
755 | new_itr = new_itr > current_itr ? | 754 | new_itr = new_itr > current_itr ? |
756 | min(current_itr + (new_itr >> 2), new_itr) : | 755 | min(current_itr + (new_itr >> 2), new_itr) : |
757 | new_itr; | 756 | new_itr; |
758 | adapter->tx_ring->itr_val = new_itr; | 757 | adapter->tx_ring->itr_val = new_itr; |
759 | 758 | ||
760 | adapter->tx_ring->set_itr = 1; | 759 | adapter->tx_ring->set_itr = 1; |
@@ -772,9 +771,10 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) | |||
772 | 771 | ||
773 | if (new_itr != adapter->rx_ring->itr_val) { | 772 | if (new_itr != adapter->rx_ring->itr_val) { |
774 | u32 current_itr = adapter->rx_ring->itr_val; | 773 | u32 current_itr = adapter->rx_ring->itr_val; |
774 | |||
775 | new_itr = new_itr > current_itr ? | 775 | new_itr = new_itr > current_itr ? |
776 | min(current_itr + (new_itr >> 2), new_itr) : | 776 | min(current_itr + (new_itr >> 2), new_itr) : |
777 | new_itr; | 777 | new_itr; |
778 | adapter->rx_ring->itr_val = new_itr; | 778 | adapter->rx_ring->itr_val = new_itr; |
779 | 779 | ||
780 | adapter->rx_ring->set_itr = 1; | 780 | adapter->rx_ring->set_itr = 1; |
@@ -829,7 +829,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) | |||
829 | segs = skb_shinfo(skb)->gso_segs ?: 1; | 829 | segs = skb_shinfo(skb)->gso_segs ?: 1; |
830 | /* multiply data chunks by size of headers */ | 830 | /* multiply data chunks by size of headers */ |
831 | bytecount = ((segs - 1) * skb_headlen(skb)) + | 831 | bytecount = ((segs - 1) * skb_headlen(skb)) + |
832 | skb->len; | 832 | skb->len; |
833 | total_packets += segs; | 833 | total_packets += segs; |
834 | total_bytes += bytecount; | 834 | total_bytes += bytecount; |
835 | } | 835 | } |
@@ -849,9 +849,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) | |||
849 | 849 | ||
850 | tx_ring->next_to_clean = i; | 850 | tx_ring->next_to_clean = i; |
851 | 851 | ||
852 | if (unlikely(count && | 852 | if (unlikely(count && netif_carrier_ok(netdev) && |
853 | netif_carrier_ok(netdev) && | 853 | igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { |
854 | igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { | ||
855 | /* Make sure that anybody stopping the queue after this | 854 | /* Make sure that anybody stopping the queue after this |
856 | * sees the new next_to_clean. | 855 | * sees the new next_to_clean. |
857 | */ | 856 | */ |
@@ -902,8 +901,9 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) | |||
902 | adapter->total_tx_bytes = 0; | 901 | adapter->total_tx_bytes = 0; |
903 | adapter->total_tx_packets = 0; | 902 | adapter->total_tx_packets = 0; |
904 | 903 | ||
905 | /* auto mask will automatically reenable the interrupt when we write | 904 | /* auto mask will automatically re-enable the interrupt when we write |
906 | * EICS */ | 905 | * EICS |
906 | */ | ||
907 | if (!igbvf_clean_tx_irq(tx_ring)) | 907 | if (!igbvf_clean_tx_irq(tx_ring)) |
908 | /* Ring was not completely cleaned, so fire another interrupt */ | 908 | /* Ring was not completely cleaned, so fire another interrupt */ |
909 | ew32(EICS, tx_ring->eims_value); | 909 | ew32(EICS, tx_ring->eims_value); |
@@ -941,15 +941,16 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) | |||
941 | #define IGBVF_NO_QUEUE -1 | 941 | #define IGBVF_NO_QUEUE -1 |
942 | 942 | ||
943 | static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, | 943 | static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, |
944 | int tx_queue, int msix_vector) | 944 | int tx_queue, int msix_vector) |
945 | { | 945 | { |
946 | struct e1000_hw *hw = &adapter->hw; | 946 | struct e1000_hw *hw = &adapter->hw; |
947 | u32 ivar, index; | 947 | u32 ivar, index; |
948 | 948 | ||
949 | /* 82576 uses a table-based method for assigning vectors. | 949 | /* 82576 uses a table-based method for assigning vectors. |
950 | Each queue has a single entry in the table to which we write | 950 | * Each queue has a single entry in the table to which we write |
951 | a vector number along with a "valid" bit. Sadly, the layout | 951 | * a vector number along with a "valid" bit. Sadly, the layout |
952 | of the table is somewhat counterintuitive. */ | 952 | * of the table is somewhat counterintuitive. |
953 | */ | ||
953 | if (rx_queue > IGBVF_NO_QUEUE) { | 954 | if (rx_queue > IGBVF_NO_QUEUE) { |
954 | index = (rx_queue >> 1); | 955 | index = (rx_queue >> 1); |
955 | ivar = array_er32(IVAR0, index); | 956 | ivar = array_er32(IVAR0, index); |
@@ -984,6 +985,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, | |||
984 | 985 | ||
985 | /** | 986 | /** |
986 | * igbvf_configure_msix - Configure MSI-X hardware | 987 | * igbvf_configure_msix - Configure MSI-X hardware |
988 | * @adapter: board private structure | ||
987 | * | 989 | * |
988 | * igbvf_configure_msix sets up the hardware to properly | 990 | * igbvf_configure_msix sets up the hardware to properly |
989 | * generate MSI-X interrupts. | 991 | * generate MSI-X interrupts. |
@@ -1027,6 +1029,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) | |||
1027 | 1029 | ||
1028 | /** | 1030 | /** |
1029 | * igbvf_set_interrupt_capability - set MSI or MSI-X if supported | 1031 | * igbvf_set_interrupt_capability - set MSI or MSI-X if supported |
1032 | * @adapter: board private structure | ||
1030 | * | 1033 | * |
1031 | * Attempt to configure interrupts using the best available | 1034 | * Attempt to configure interrupts using the best available |
1032 | * capabilities of the hardware and kernel. | 1035 | * capabilities of the hardware and kernel. |
@@ -1036,27 +1039,28 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) | |||
1036 | int err = -ENOMEM; | 1039 | int err = -ENOMEM; |
1037 | int i; | 1040 | int i; |
1038 | 1041 | ||
1039 | /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ | 1042 | /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */ |
1040 | adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), | 1043 | adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), |
1041 | GFP_KERNEL); | 1044 | GFP_KERNEL); |
1042 | if (adapter->msix_entries) { | 1045 | if (adapter->msix_entries) { |
1043 | for (i = 0; i < 3; i++) | 1046 | for (i = 0; i < 3; i++) |
1044 | adapter->msix_entries[i].entry = i; | 1047 | adapter->msix_entries[i].entry = i; |
1045 | 1048 | ||
1046 | err = pci_enable_msix_range(adapter->pdev, | 1049 | err = pci_enable_msix_range(adapter->pdev, |
1047 | adapter->msix_entries, 3, 3); | 1050 | adapter->msix_entries, 3, 3); |
1048 | } | 1051 | } |
1049 | 1052 | ||
1050 | if (err < 0) { | 1053 | if (err < 0) { |
1051 | /* MSI-X failed */ | 1054 | /* MSI-X failed */ |
1052 | dev_err(&adapter->pdev->dev, | 1055 | dev_err(&adapter->pdev->dev, |
1053 | "Failed to initialize MSI-X interrupts.\n"); | 1056 | "Failed to initialize MSI-X interrupts.\n"); |
1054 | igbvf_reset_interrupt_capability(adapter); | 1057 | igbvf_reset_interrupt_capability(adapter); |
1055 | } | 1058 | } |
1056 | } | 1059 | } |
1057 | 1060 | ||
1058 | /** | 1061 | /** |
1059 | * igbvf_request_msix - Initialize MSI-X interrupts | 1062 | * igbvf_request_msix - Initialize MSI-X interrupts |
1063 | * @adapter: board private structure | ||
1060 | * | 1064 | * |
1061 | * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the | 1065 | * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the |
1062 | * kernel. | 1066 | * kernel. |
@@ -1075,8 +1079,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) | |||
1075 | } | 1079 | } |
1076 | 1080 | ||
1077 | err = request_irq(adapter->msix_entries[vector].vector, | 1081 | err = request_irq(adapter->msix_entries[vector].vector, |
1078 | igbvf_intr_msix_tx, 0, adapter->tx_ring->name, | 1082 | igbvf_intr_msix_tx, 0, adapter->tx_ring->name, |
1079 | netdev); | 1083 | netdev); |
1080 | if (err) | 1084 | if (err) |
1081 | goto out; | 1085 | goto out; |
1082 | 1086 | ||
@@ -1085,8 +1089,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) | |||
1085 | vector++; | 1089 | vector++; |
1086 | 1090 | ||
1087 | err = request_irq(adapter->msix_entries[vector].vector, | 1091 | err = request_irq(adapter->msix_entries[vector].vector, |
1088 | igbvf_intr_msix_rx, 0, adapter->rx_ring->name, | 1092 | igbvf_intr_msix_rx, 0, adapter->rx_ring->name, |
1089 | netdev); | 1093 | netdev); |
1090 | if (err) | 1094 | if (err) |
1091 | goto out; | 1095 | goto out; |
1092 | 1096 | ||
@@ -1095,7 +1099,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) | |||
1095 | vector++; | 1099 | vector++; |
1096 | 1100 | ||
1097 | err = request_irq(adapter->msix_entries[vector].vector, | 1101 | err = request_irq(adapter->msix_entries[vector].vector, |
1098 | igbvf_msix_other, 0, netdev->name, netdev); | 1102 | igbvf_msix_other, 0, netdev->name, netdev); |
1099 | if (err) | 1103 | if (err) |
1100 | goto out; | 1104 | goto out; |
1101 | 1105 | ||
@@ -1130,6 +1134,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter) | |||
1130 | 1134 | ||
1131 | /** | 1135 | /** |
1132 | * igbvf_request_irq - initialize interrupts | 1136 | * igbvf_request_irq - initialize interrupts |
1137 | * @adapter: board private structure | ||
1133 | * | 1138 | * |
1134 | * Attempts to configure interrupts using the best available | 1139 | * Attempts to configure interrupts using the best available |
1135 | * capabilities of the hardware and kernel. | 1140 | * capabilities of the hardware and kernel. |
@@ -1146,7 +1151,7 @@ static int igbvf_request_irq(struct igbvf_adapter *adapter) | |||
1146 | return err; | 1151 | return err; |
1147 | 1152 | ||
1148 | dev_err(&adapter->pdev->dev, | 1153 | dev_err(&adapter->pdev->dev, |
1149 | "Unable to allocate interrupt, Error: %d\n", err); | 1154 | "Unable to allocate interrupt, Error: %d\n", err); |
1150 | 1155 | ||
1151 | return err; | 1156 | return err; |
1152 | } | 1157 | } |
@@ -1164,6 +1169,7 @@ static void igbvf_free_irq(struct igbvf_adapter *adapter) | |||
1164 | 1169 | ||
1165 | /** | 1170 | /** |
1166 | * igbvf_irq_disable - Mask off interrupt generation on the NIC | 1171 | * igbvf_irq_disable - Mask off interrupt generation on the NIC |
1172 | * @adapter: board private structure | ||
1167 | **/ | 1173 | **/ |
1168 | static void igbvf_irq_disable(struct igbvf_adapter *adapter) | 1174 | static void igbvf_irq_disable(struct igbvf_adapter *adapter) |
1169 | { | 1175 | { |
@@ -1177,6 +1183,7 @@ static void igbvf_irq_disable(struct igbvf_adapter *adapter) | |||
1177 | 1183 | ||
1178 | /** | 1184 | /** |
1179 | * igbvf_irq_enable - Enable default interrupt generation settings | 1185 | * igbvf_irq_enable - Enable default interrupt generation settings |
1186 | * @adapter: board private structure | ||
1180 | **/ | 1187 | **/ |
1181 | static void igbvf_irq_enable(struct igbvf_adapter *adapter) | 1188 | static void igbvf_irq_enable(struct igbvf_adapter *adapter) |
1182 | { | 1189 | { |
@@ -1252,7 +1259,7 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, | |||
1252 | 1259 | ||
1253 | if (hw->mac.ops.set_vfta(hw, vid, false)) { | 1260 | if (hw->mac.ops.set_vfta(hw, vid, false)) { |
1254 | dev_err(&adapter->pdev->dev, | 1261 | dev_err(&adapter->pdev->dev, |
1255 | "Failed to remove vlan id %d\n", vid); | 1262 | "Failed to remove vlan id %d\n", vid); |
1256 | return -EINVAL; | 1263 | return -EINVAL; |
1257 | } | 1264 | } |
1258 | clear_bit(vid, adapter->active_vlans); | 1265 | clear_bit(vid, adapter->active_vlans); |
@@ -1298,7 +1305,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) | |||
1298 | 1305 | ||
1299 | /* Turn off Relaxed Ordering on head write-backs. The writebacks | 1306 | /* Turn off Relaxed Ordering on head write-backs. The writebacks |
1300 | * MUST be delivered in order or it will completely screw up | 1307 | * MUST be delivered in order or it will completely screw up |
1301 | * our bookeeping. | 1308 | * our bookkeeping. |
1302 | */ | 1309 | */ |
1303 | dca_txctrl = er32(DCA_TXCTRL(0)); | 1310 | dca_txctrl = er32(DCA_TXCTRL(0)); |
1304 | dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; | 1311 | dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; |
@@ -1325,15 +1332,15 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) | |||
1325 | u32 srrctl = 0; | 1332 | u32 srrctl = 0; |
1326 | 1333 | ||
1327 | srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | | 1334 | srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | |
1328 | E1000_SRRCTL_BSIZEHDR_MASK | | 1335 | E1000_SRRCTL_BSIZEHDR_MASK | |
1329 | E1000_SRRCTL_BSIZEPKT_MASK); | 1336 | E1000_SRRCTL_BSIZEPKT_MASK); |
1330 | 1337 | ||
1331 | /* Enable queue drop to avoid head of line blocking */ | 1338 | /* Enable queue drop to avoid head of line blocking */ |
1332 | srrctl |= E1000_SRRCTL_DROP_EN; | 1339 | srrctl |= E1000_SRRCTL_DROP_EN; |
1333 | 1340 | ||
1334 | /* Setup buffer sizes */ | 1341 | /* Setup buffer sizes */ |
1335 | srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> | 1342 | srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> |
1336 | E1000_SRRCTL_BSIZEPKT_SHIFT; | 1343 | E1000_SRRCTL_BSIZEPKT_SHIFT; |
1337 | 1344 | ||
1338 | if (adapter->rx_buffer_len < 2048) { | 1345 | if (adapter->rx_buffer_len < 2048) { |
1339 | adapter->rx_ps_hdr_size = 0; | 1346 | adapter->rx_ps_hdr_size = 0; |
@@ -1341,7 +1348,7 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) | |||
1341 | } else { | 1348 | } else { |
1342 | adapter->rx_ps_hdr_size = 128; | 1349 | adapter->rx_ps_hdr_size = 128; |
1343 | srrctl |= adapter->rx_ps_hdr_size << | 1350 | srrctl |= adapter->rx_ps_hdr_size << |
1344 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; | 1351 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
1345 | srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; | 1352 | srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; |
1346 | } | 1353 | } |
1347 | 1354 | ||
@@ -1369,8 +1376,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) | |||
1369 | 1376 | ||
1370 | rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); | 1377 | rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); |
1371 | 1378 | ||
1372 | /* | 1379 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
1373 | * Setup the HW Rx Head and Tail Descriptor Pointers and | ||
1374 | * the Base and Length of the Rx Descriptor Ring | 1380 | * the Base and Length of the Rx Descriptor Ring |
1375 | */ | 1381 | */ |
1376 | rdba = rx_ring->dma; | 1382 | rdba = rx_ring->dma; |
@@ -1441,10 +1447,11 @@ static void igbvf_configure(struct igbvf_adapter *adapter) | |||
1441 | igbvf_setup_srrctl(adapter); | 1447 | igbvf_setup_srrctl(adapter); |
1442 | igbvf_configure_rx(adapter); | 1448 | igbvf_configure_rx(adapter); |
1443 | igbvf_alloc_rx_buffers(adapter->rx_ring, | 1449 | igbvf_alloc_rx_buffers(adapter->rx_ring, |
1444 | igbvf_desc_unused(adapter->rx_ring)); | 1450 | igbvf_desc_unused(adapter->rx_ring)); |
1445 | } | 1451 | } |
1446 | 1452 | ||
1447 | /* igbvf_reset - bring the hardware into a known good state | 1453 | /* igbvf_reset - bring the hardware into a known good state |
1454 | * @adapter: private board structure | ||
1448 | * | 1455 | * |
1449 | * This function boots the hardware and enables some settings that | 1456 | * This function boots the hardware and enables some settings that |
1450 | * require a configuration cycle of the hardware - those cannot be | 1457 | * require a configuration cycle of the hardware - those cannot be |
@@ -1494,7 +1501,6 @@ int igbvf_up(struct igbvf_adapter *adapter) | |||
1494 | hw->mac.get_link_status = 1; | 1501 | hw->mac.get_link_status = 1; |
1495 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 1502 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
1496 | 1503 | ||
1497 | |||
1498 | return 0; | 1504 | return 0; |
1499 | } | 1505 | } |
1500 | 1506 | ||
@@ -1504,8 +1510,7 @@ void igbvf_down(struct igbvf_adapter *adapter) | |||
1504 | struct e1000_hw *hw = &adapter->hw; | 1510 | struct e1000_hw *hw = &adapter->hw; |
1505 | u32 rxdctl, txdctl; | 1511 | u32 rxdctl, txdctl; |
1506 | 1512 | ||
1507 | /* | 1513 | /* signal that we're down so the interrupt handler does not |
1508 | * signal that we're down so the interrupt handler does not | ||
1509 | * reschedule our watchdog timer | 1514 | * reschedule our watchdog timer |
1510 | */ | 1515 | */ |
1511 | set_bit(__IGBVF_DOWN, &adapter->state); | 1516 | set_bit(__IGBVF_DOWN, &adapter->state); |
@@ -1662,8 +1667,7 @@ static int igbvf_open(struct net_device *netdev) | |||
1662 | if (err) | 1667 | if (err) |
1663 | goto err_setup_rx; | 1668 | goto err_setup_rx; |
1664 | 1669 | ||
1665 | /* | 1670 | /* before we allocate an interrupt, we must be ready to handle it. |
1666 | * before we allocate an interrupt, we must be ready to handle it. | ||
1667 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | 1671 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
1668 | * as soon as we call pci_request_irq, so we have to setup our | 1672 | * as soon as we call pci_request_irq, so we have to setup our |
1669 | * clean_rx handler before we do so. | 1673 | * clean_rx handler before we do so. |
@@ -1725,6 +1729,7 @@ static int igbvf_close(struct net_device *netdev) | |||
1725 | 1729 | ||
1726 | return 0; | 1730 | return 0; |
1727 | } | 1731 | } |
1732 | |||
1728 | /** | 1733 | /** |
1729 | * igbvf_set_mac - Change the Ethernet Address of the NIC | 1734 | * igbvf_set_mac - Change the Ethernet Address of the NIC |
1730 | * @netdev: network interface device structure | 1735 | * @netdev: network interface device structure |
@@ -1753,15 +1758,15 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) | |||
1753 | return 0; | 1758 | return 0; |
1754 | } | 1759 | } |
1755 | 1760 | ||
1756 | #define UPDATE_VF_COUNTER(reg, name) \ | 1761 | #define UPDATE_VF_COUNTER(reg, name) \ |
1757 | { \ | 1762 | { \ |
1758 | u32 current_counter = er32(reg); \ | 1763 | u32 current_counter = er32(reg); \ |
1759 | if (current_counter < adapter->stats.last_##name) \ | 1764 | if (current_counter < adapter->stats.last_##name) \ |
1760 | adapter->stats.name += 0x100000000LL; \ | 1765 | adapter->stats.name += 0x100000000LL; \ |
1761 | adapter->stats.last_##name = current_counter; \ | 1766 | adapter->stats.last_##name = current_counter; \ |
1762 | adapter->stats.name &= 0xFFFFFFFF00000000LL; \ | 1767 | adapter->stats.name &= 0xFFFFFFFF00000000LL; \ |
1763 | adapter->stats.name |= current_counter; \ | 1768 | adapter->stats.name |= current_counter; \ |
1764 | } | 1769 | } |
1765 | 1770 | ||
1766 | /** | 1771 | /** |
1767 | * igbvf_update_stats - Update the board statistics counters | 1772 | * igbvf_update_stats - Update the board statistics counters |
@@ -1772,8 +1777,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter) | |||
1772 | struct e1000_hw *hw = &adapter->hw; | 1777 | struct e1000_hw *hw = &adapter->hw; |
1773 | struct pci_dev *pdev = adapter->pdev; | 1778 | struct pci_dev *pdev = adapter->pdev; |
1774 | 1779 | ||
1775 | /* | 1780 | /* Prevent stats update while adapter is being reset, link is down |
1776 | * Prevent stats update while adapter is being reset, link is down | ||
1777 | * or if the pci connection is down. | 1781 | * or if the pci connection is down. |
1778 | */ | 1782 | */ |
1779 | if (adapter->link_speed == 0) | 1783 | if (adapter->link_speed == 0) |
@@ -1832,7 +1836,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter) | |||
1832 | **/ | 1836 | **/ |
1833 | static void igbvf_watchdog(unsigned long data) | 1837 | static void igbvf_watchdog(unsigned long data) |
1834 | { | 1838 | { |
1835 | struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; | 1839 | struct igbvf_adapter *adapter = (struct igbvf_adapter *)data; |
1836 | 1840 | ||
1837 | /* Do the rest outside of interrupt context */ | 1841 | /* Do the rest outside of interrupt context */ |
1838 | schedule_work(&adapter->watchdog_task); | 1842 | schedule_work(&adapter->watchdog_task); |
@@ -1841,8 +1845,8 @@ static void igbvf_watchdog(unsigned long data) | |||
1841 | static void igbvf_watchdog_task(struct work_struct *work) | 1845 | static void igbvf_watchdog_task(struct work_struct *work) |
1842 | { | 1846 | { |
1843 | struct igbvf_adapter *adapter = container_of(work, | 1847 | struct igbvf_adapter *adapter = container_of(work, |
1844 | struct igbvf_adapter, | 1848 | struct igbvf_adapter, |
1845 | watchdog_task); | 1849 | watchdog_task); |
1846 | struct net_device *netdev = adapter->netdev; | 1850 | struct net_device *netdev = adapter->netdev; |
1847 | struct e1000_mac_info *mac = &adapter->hw.mac; | 1851 | struct e1000_mac_info *mac = &adapter->hw.mac; |
1848 | struct igbvf_ring *tx_ring = adapter->tx_ring; | 1852 | struct igbvf_ring *tx_ring = adapter->tx_ring; |
@@ -1855,8 +1859,8 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1855 | if (link) { | 1859 | if (link) { |
1856 | if (!netif_carrier_ok(netdev)) { | 1860 | if (!netif_carrier_ok(netdev)) { |
1857 | mac->ops.get_link_up_info(&adapter->hw, | 1861 | mac->ops.get_link_up_info(&adapter->hw, |
1858 | &adapter->link_speed, | 1862 | &adapter->link_speed, |
1859 | &adapter->link_duplex); | 1863 | &adapter->link_duplex); |
1860 | igbvf_print_link_info(adapter); | 1864 | igbvf_print_link_info(adapter); |
1861 | 1865 | ||
1862 | netif_carrier_on(netdev); | 1866 | netif_carrier_on(netdev); |
@@ -1876,10 +1880,9 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1876 | igbvf_update_stats(adapter); | 1880 | igbvf_update_stats(adapter); |
1877 | } else { | 1881 | } else { |
1878 | tx_pending = (igbvf_desc_unused(tx_ring) + 1 < | 1882 | tx_pending = (igbvf_desc_unused(tx_ring) + 1 < |
1879 | tx_ring->count); | 1883 | tx_ring->count); |
1880 | if (tx_pending) { | 1884 | if (tx_pending) { |
1881 | /* | 1885 | /* We've lost link, so the controller stops DMA, |
1882 | * We've lost link, so the controller stops DMA, | ||
1883 | * but we've got queued Tx work that's never going | 1886 | * but we've got queued Tx work that's never going |
1884 | * to get done, so reset controller to flush Tx. | 1887 | * to get done, so reset controller to flush Tx. |
1885 | * (Do the reset outside of interrupt context). | 1888 | * (Do the reset outside of interrupt context). |
@@ -1898,15 +1901,15 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1898 | round_jiffies(jiffies + (2 * HZ))); | 1901 | round_jiffies(jiffies + (2 * HZ))); |
1899 | } | 1902 | } |
1900 | 1903 | ||
1901 | #define IGBVF_TX_FLAGS_CSUM 0x00000001 | 1904 | #define IGBVF_TX_FLAGS_CSUM 0x00000001 |
1902 | #define IGBVF_TX_FLAGS_VLAN 0x00000002 | 1905 | #define IGBVF_TX_FLAGS_VLAN 0x00000002 |
1903 | #define IGBVF_TX_FLAGS_TSO 0x00000004 | 1906 | #define IGBVF_TX_FLAGS_TSO 0x00000004 |
1904 | #define IGBVF_TX_FLAGS_IPV4 0x00000008 | 1907 | #define IGBVF_TX_FLAGS_IPV4 0x00000008 |
1905 | #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 | 1908 | #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 |
1906 | #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 | 1909 | #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 |
1907 | 1910 | ||
1908 | static int igbvf_tso(struct igbvf_adapter *adapter, | 1911 | static int igbvf_tso(struct igbvf_adapter *adapter, |
1909 | struct igbvf_ring *tx_ring, | 1912 | struct igbvf_ring *tx_ring, |
1910 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, | 1913 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, |
1911 | __be16 protocol) | 1914 | __be16 protocol) |
1912 | { | 1915 | { |
@@ -1930,17 +1933,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter, | |||
1930 | 1933 | ||
1931 | if (protocol == htons(ETH_P_IP)) { | 1934 | if (protocol == htons(ETH_P_IP)) { |
1932 | struct iphdr *iph = ip_hdr(skb); | 1935 | struct iphdr *iph = ip_hdr(skb); |
1936 | |||
1933 | iph->tot_len = 0; | 1937 | iph->tot_len = 0; |
1934 | iph->check = 0; | 1938 | iph->check = 0; |
1935 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | 1939 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
1936 | iph->daddr, 0, | 1940 | iph->daddr, 0, |
1937 | IPPROTO_TCP, | 1941 | IPPROTO_TCP, |
1938 | 0); | 1942 | 0); |
1939 | } else if (skb_is_gso_v6(skb)) { | 1943 | } else if (skb_is_gso_v6(skb)) { |
1940 | ipv6_hdr(skb)->payload_len = 0; | 1944 | ipv6_hdr(skb)->payload_len = 0; |
1941 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 1945 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
1942 | &ipv6_hdr(skb)->daddr, | 1946 | &ipv6_hdr(skb)->daddr, |
1943 | 0, IPPROTO_TCP, 0); | 1947 | 0, IPPROTO_TCP, 0); |
1944 | } | 1948 | } |
1945 | 1949 | ||
1946 | i = tx_ring->next_to_use; | 1950 | i = tx_ring->next_to_use; |
@@ -1984,7 +1988,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, | |||
1984 | } | 1988 | } |
1985 | 1989 | ||
1986 | static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, | 1990 | static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, |
1987 | struct igbvf_ring *tx_ring, | 1991 | struct igbvf_ring *tx_ring, |
1988 | struct sk_buff *skb, u32 tx_flags, | 1992 | struct sk_buff *skb, u32 tx_flags, |
1989 | __be16 protocol) | 1993 | __be16 protocol) |
1990 | { | 1994 | { |
@@ -2005,8 +2009,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, | |||
2005 | info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); | 2009 | info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); |
2006 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 2010 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
2007 | info |= (skb_transport_header(skb) - | 2011 | info |= (skb_transport_header(skb) - |
2008 | skb_network_header(skb)); | 2012 | skb_network_header(skb)); |
2009 | |||
2010 | 2013 | ||
2011 | context_desc->vlan_macip_lens = cpu_to_le32(info); | 2014 | context_desc->vlan_macip_lens = cpu_to_le32(info); |
2012 | 2015 | ||
@@ -2055,6 +2058,10 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) | |||
2055 | 2058 | ||
2056 | netif_stop_queue(netdev); | 2059 | netif_stop_queue(netdev); |
2057 | 2060 | ||
2061 | /* Herbert's original patch had: | ||
2062 | * smp_mb__after_netif_stop_queue(); | ||
2063 | * but since that doesn't exist yet, just open code it. | ||
2064 | */ | ||
2058 | smp_mb(); | 2065 | smp_mb(); |
2059 | 2066 | ||
2060 | /* We need to check again just in case room has been made available */ | 2067 | /* We need to check again just in case room has been made available */ |
@@ -2067,11 +2074,11 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) | |||
2067 | return 0; | 2074 | return 0; |
2068 | } | 2075 | } |
2069 | 2076 | ||
2070 | #define IGBVF_MAX_TXD_PWR 16 | 2077 | #define IGBVF_MAX_TXD_PWR 16 |
2071 | #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) | 2078 | #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) |
2072 | 2079 | ||
2073 | static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | 2080 | static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, |
2074 | struct igbvf_ring *tx_ring, | 2081 | struct igbvf_ring *tx_ring, |
2075 | struct sk_buff *skb) | 2082 | struct sk_buff *skb) |
2076 | { | 2083 | { |
2077 | struct igbvf_buffer *buffer_info; | 2084 | struct igbvf_buffer *buffer_info; |
@@ -2093,7 +2100,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |||
2093 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | 2100 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
2094 | goto dma_error; | 2101 | goto dma_error; |
2095 | 2102 | ||
2096 | |||
2097 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { | 2103 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { |
2098 | const struct skb_frag_struct *frag; | 2104 | const struct skb_frag_struct *frag; |
2099 | 2105 | ||
@@ -2111,7 +2117,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |||
2111 | buffer_info->time_stamp = jiffies; | 2117 | buffer_info->time_stamp = jiffies; |
2112 | buffer_info->mapped_as_page = true; | 2118 | buffer_info->mapped_as_page = true; |
2113 | buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, | 2119 | buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, |
2114 | DMA_TO_DEVICE); | 2120 | DMA_TO_DEVICE); |
2115 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | 2121 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
2116 | goto dma_error; | 2122 | goto dma_error; |
2117 | } | 2123 | } |
@@ -2133,7 +2139,7 @@ dma_error: | |||
2133 | 2139 | ||
2134 | /* clear timestamp and dma mappings for remaining portion of packet */ | 2140 | /* clear timestamp and dma mappings for remaining portion of packet */ |
2135 | while (count--) { | 2141 | while (count--) { |
2136 | if (i==0) | 2142 | if (i == 0) |
2137 | i += tx_ring->count; | 2143 | i += tx_ring->count; |
2138 | i--; | 2144 | i--; |
2139 | buffer_info = &tx_ring->buffer_info[i]; | 2145 | buffer_info = &tx_ring->buffer_info[i]; |
@@ -2144,10 +2150,10 @@ dma_error: | |||
2144 | } | 2150 | } |
2145 | 2151 | ||
2146 | static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, | 2152 | static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, |
2147 | struct igbvf_ring *tx_ring, | 2153 | struct igbvf_ring *tx_ring, |
2148 | int tx_flags, int count, | 2154 | int tx_flags, int count, |
2149 | unsigned int first, u32 paylen, | 2155 | unsigned int first, u32 paylen, |
2150 | u8 hdr_len) | 2156 | u8 hdr_len) |
2151 | { | 2157 | { |
2152 | union e1000_adv_tx_desc *tx_desc = NULL; | 2158 | union e1000_adv_tx_desc *tx_desc = NULL; |
2153 | struct igbvf_buffer *buffer_info; | 2159 | struct igbvf_buffer *buffer_info; |
@@ -2155,7 +2161,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, | |||
2155 | unsigned int i; | 2161 | unsigned int i; |
2156 | 2162 | ||
2157 | cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | | 2163 | cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | |
2158 | E1000_ADVTXD_DCMD_DEXT); | 2164 | E1000_ADVTXD_DCMD_DEXT); |
2159 | 2165 | ||
2160 | if (tx_flags & IGBVF_TX_FLAGS_VLAN) | 2166 | if (tx_flags & IGBVF_TX_FLAGS_VLAN) |
2161 | cmd_type_len |= E1000_ADVTXD_DCMD_VLE; | 2167 | cmd_type_len |= E1000_ADVTXD_DCMD_VLE; |
@@ -2182,7 +2188,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, | |||
2182 | tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); | 2188 | tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); |
2183 | tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | 2189 | tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); |
2184 | tx_desc->read.cmd_type_len = | 2190 | tx_desc->read.cmd_type_len = |
2185 | cpu_to_le32(cmd_type_len | buffer_info->length); | 2191 | cpu_to_le32(cmd_type_len | buffer_info->length); |
2186 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); | 2192 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
2187 | i++; | 2193 | i++; |
2188 | if (i == tx_ring->count) | 2194 | if (i == tx_ring->count) |
@@ -2193,14 +2199,16 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, | |||
2193 | /* Force memory writes to complete before letting h/w | 2199 | /* Force memory writes to complete before letting h/w |
2194 | * know there are new descriptors to fetch. (Only | 2200 | * know there are new descriptors to fetch. (Only |
2195 | * applicable for weak-ordered memory model archs, | 2201 | * applicable for weak-ordered memory model archs, |
2196 | * such as IA-64). */ | 2202 | * such as IA-64). |
2203 | */ | ||
2197 | wmb(); | 2204 | wmb(); |
2198 | 2205 | ||
2199 | tx_ring->buffer_info[first].next_to_watch = tx_desc; | 2206 | tx_ring->buffer_info[first].next_to_watch = tx_desc; |
2200 | tx_ring->next_to_use = i; | 2207 | tx_ring->next_to_use = i; |
2201 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | 2208 | writel(i, adapter->hw.hw_addr + tx_ring->tail); |
2202 | /* we need this if more than one processor can write to our tail | 2209 | /* we need this if more than one processor can write to our tail |
2203 | * at a time, it syncronizes IO on IA64/Altix systems */ | 2210 | * at a time, it synchronizes IO on IA64/Altix systems |
2211 | */ | ||
2204 | mmiowb(); | 2212 | mmiowb(); |
2205 | } | 2213 | } |
2206 | 2214 | ||
@@ -2225,11 +2233,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2225 | return NETDEV_TX_OK; | 2233 | return NETDEV_TX_OK; |
2226 | } | 2234 | } |
2227 | 2235 | ||
2228 | /* | 2236 | /* need: count + 4 desc gap to keep tail from touching |
2229 | * need: count + 4 desc gap to keep tail from touching | 2237 | * + 2 desc gap to keep tail from touching head, |
2230 | * + 2 desc gap to keep tail from touching head, | 2238 | * + 1 desc for skb->data, |
2231 | * + 1 desc for skb->data, | 2239 | * + 1 desc for context descriptor, |
2232 | * + 1 desc for context descriptor, | ||
2233 | * head, otherwise try next time | 2240 | * head, otherwise try next time |
2234 | */ | 2241 | */ |
2235 | if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { | 2242 | if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { |
@@ -2258,11 +2265,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2258 | if (tso) | 2265 | if (tso) |
2259 | tx_flags |= IGBVF_TX_FLAGS_TSO; | 2266 | tx_flags |= IGBVF_TX_FLAGS_TSO; |
2260 | else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && | 2267 | else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && |
2261 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 2268 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
2262 | tx_flags |= IGBVF_TX_FLAGS_CSUM; | 2269 | tx_flags |= IGBVF_TX_FLAGS_CSUM; |
2263 | 2270 | ||
2264 | /* | 2271 | /* count reflects descriptors mapped, if 0 then mapping error |
2265 | * count reflects descriptors mapped, if 0 then mapping error | ||
2266 | * has occurred and we need to rewind the descriptor queue | 2272 | * has occurred and we need to rewind the descriptor queue |
2267 | */ | 2273 | */ |
2268 | count = igbvf_tx_map_adv(adapter, tx_ring, skb); | 2274 | count = igbvf_tx_map_adv(adapter, tx_ring, skb); |
@@ -2313,6 +2319,7 @@ static void igbvf_tx_timeout(struct net_device *netdev) | |||
2313 | static void igbvf_reset_task(struct work_struct *work) | 2319 | static void igbvf_reset_task(struct work_struct *work) |
2314 | { | 2320 | { |
2315 | struct igbvf_adapter *adapter; | 2321 | struct igbvf_adapter *adapter; |
2322 | |||
2316 | adapter = container_of(work, struct igbvf_adapter, reset_task); | 2323 | adapter = container_of(work, struct igbvf_adapter, reset_task); |
2317 | 2324 | ||
2318 | igbvf_reinit_locked(adapter); | 2325 | igbvf_reinit_locked(adapter); |
@@ -2356,14 +2363,13 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) | |||
2356 | } | 2363 | } |
2357 | 2364 | ||
2358 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) | 2365 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) |
2359 | msleep(1); | 2366 | usleep_range(1000, 2000); |
2360 | /* igbvf_down has a dependency on max_frame_size */ | 2367 | /* igbvf_down has a dependency on max_frame_size */ |
2361 | adapter->max_frame_size = max_frame; | 2368 | adapter->max_frame_size = max_frame; |
2362 | if (netif_running(netdev)) | 2369 | if (netif_running(netdev)) |
2363 | igbvf_down(adapter); | 2370 | igbvf_down(adapter); |
2364 | 2371 | ||
2365 | /* | 2372 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
2366 | * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | ||
2367 | * means we reserve 2 more, this pushes us to allocate from the next | 2373 | * means we reserve 2 more, this pushes us to allocate from the next |
2368 | * larger slab size. | 2374 | * larger slab size. |
2369 | * i.e. RXBUFFER_2048 --> size-4096 slab | 2375 | * i.e. RXBUFFER_2048 --> size-4096 slab |
@@ -2382,15 +2388,14 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) | |||
2382 | adapter->rx_buffer_len = PAGE_SIZE / 2; | 2388 | adapter->rx_buffer_len = PAGE_SIZE / 2; |
2383 | #endif | 2389 | #endif |
2384 | 2390 | ||
2385 | |||
2386 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | 2391 | /* adjust allocation if LPE protects us, and we aren't using SBP */ |
2387 | if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || | 2392 | if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || |
2388 | (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) | 2393 | (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) |
2389 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + | 2394 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + |
2390 | ETH_FCS_LEN; | 2395 | ETH_FCS_LEN; |
2391 | 2396 | ||
2392 | dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", | 2397 | dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", |
2393 | netdev->mtu, new_mtu); | 2398 | netdev->mtu, new_mtu); |
2394 | netdev->mtu = new_mtu; | 2399 | netdev->mtu = new_mtu; |
2395 | 2400 | ||
2396 | if (netif_running(netdev)) | 2401 | if (netif_running(netdev)) |
@@ -2477,8 +2482,7 @@ static void igbvf_shutdown(struct pci_dev *pdev) | |||
2477 | } | 2482 | } |
2478 | 2483 | ||
2479 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2484 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2480 | /* | 2485 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
2481 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
2482 | * without having to re-enable interrupts. It's not called while | 2486 | * without having to re-enable interrupts. It's not called while |
2483 | * the interrupt routine is executing. | 2487 | * the interrupt routine is executing. |
2484 | */ | 2488 | */ |
@@ -2503,7 +2507,7 @@ static void igbvf_netpoll(struct net_device *netdev) | |||
2503 | * this device has been detected. | 2507 | * this device has been detected. |
2504 | */ | 2508 | */ |
2505 | static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, | 2509 | static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, |
2506 | pci_channel_state_t state) | 2510 | pci_channel_state_t state) |
2507 | { | 2511 | { |
2508 | struct net_device *netdev = pci_get_drvdata(pdev); | 2512 | struct net_device *netdev = pci_get_drvdata(pdev); |
2509 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 2513 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
@@ -2583,7 +2587,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter) | |||
2583 | } | 2587 | } |
2584 | 2588 | ||
2585 | static int igbvf_set_features(struct net_device *netdev, | 2589 | static int igbvf_set_features(struct net_device *netdev, |
2586 | netdev_features_t features) | 2590 | netdev_features_t features) |
2587 | { | 2591 | { |
2588 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 2592 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
2589 | 2593 | ||
@@ -2596,21 +2600,21 @@ static int igbvf_set_features(struct net_device *netdev, | |||
2596 | } | 2600 | } |
2597 | 2601 | ||
2598 | static const struct net_device_ops igbvf_netdev_ops = { | 2602 | static const struct net_device_ops igbvf_netdev_ops = { |
2599 | .ndo_open = igbvf_open, | 2603 | .ndo_open = igbvf_open, |
2600 | .ndo_stop = igbvf_close, | 2604 | .ndo_stop = igbvf_close, |
2601 | .ndo_start_xmit = igbvf_xmit_frame, | 2605 | .ndo_start_xmit = igbvf_xmit_frame, |
2602 | .ndo_get_stats = igbvf_get_stats, | 2606 | .ndo_get_stats = igbvf_get_stats, |
2603 | .ndo_set_rx_mode = igbvf_set_multi, | 2607 | .ndo_set_rx_mode = igbvf_set_multi, |
2604 | .ndo_set_mac_address = igbvf_set_mac, | 2608 | .ndo_set_mac_address = igbvf_set_mac, |
2605 | .ndo_change_mtu = igbvf_change_mtu, | 2609 | .ndo_change_mtu = igbvf_change_mtu, |
2606 | .ndo_do_ioctl = igbvf_ioctl, | 2610 | .ndo_do_ioctl = igbvf_ioctl, |
2607 | .ndo_tx_timeout = igbvf_tx_timeout, | 2611 | .ndo_tx_timeout = igbvf_tx_timeout, |
2608 | .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, | 2612 | .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, |
2609 | .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, | 2613 | .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, |
2610 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2614 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2611 | .ndo_poll_controller = igbvf_netpoll, | 2615 | .ndo_poll_controller = igbvf_netpoll, |
2612 | #endif | 2616 | #endif |
2613 | .ndo_set_features = igbvf_set_features, | 2617 | .ndo_set_features = igbvf_set_features, |
2614 | }; | 2618 | }; |
2615 | 2619 | ||
2616 | /** | 2620 | /** |
@@ -2645,8 +2649,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2645 | } else { | 2649 | } else { |
2646 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | 2650 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
2647 | if (err) { | 2651 | if (err) { |
2648 | dev_err(&pdev->dev, "No usable DMA " | 2652 | dev_err(&pdev->dev, |
2649 | "configuration, aborting\n"); | 2653 | "No usable DMA configuration, aborting\n"); |
2650 | goto err_dma; | 2654 | goto err_dma; |
2651 | } | 2655 | } |
2652 | } | 2656 | } |
@@ -2686,7 +2690,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2686 | 2690 | ||
2687 | err = -EIO; | 2691 | err = -EIO; |
2688 | adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), | 2692 | adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), |
2689 | pci_resource_len(pdev, 0)); | 2693 | pci_resource_len(pdev, 0)); |
2690 | 2694 | ||
2691 | if (!adapter->hw.hw_addr) | 2695 | if (!adapter->hw.hw_addr) |
2692 | goto err_ioremap; | 2696 | goto err_ioremap; |
@@ -2712,16 +2716,16 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2712 | adapter->bd_number = cards_found++; | 2716 | adapter->bd_number = cards_found++; |
2713 | 2717 | ||
2714 | netdev->hw_features = NETIF_F_SG | | 2718 | netdev->hw_features = NETIF_F_SG | |
2715 | NETIF_F_IP_CSUM | | 2719 | NETIF_F_IP_CSUM | |
2716 | NETIF_F_IPV6_CSUM | | 2720 | NETIF_F_IPV6_CSUM | |
2717 | NETIF_F_TSO | | 2721 | NETIF_F_TSO | |
2718 | NETIF_F_TSO6 | | 2722 | NETIF_F_TSO6 | |
2719 | NETIF_F_RXCSUM; | 2723 | NETIF_F_RXCSUM; |
2720 | 2724 | ||
2721 | netdev->features = netdev->hw_features | | 2725 | netdev->features = netdev->hw_features | |
2722 | NETIF_F_HW_VLAN_CTAG_TX | | 2726 | NETIF_F_HW_VLAN_CTAG_TX | |
2723 | NETIF_F_HW_VLAN_CTAG_RX | | 2727 | NETIF_F_HW_VLAN_CTAG_RX | |
2724 | NETIF_F_HW_VLAN_CTAG_FILTER; | 2728 | NETIF_F_HW_VLAN_CTAG_FILTER; |
2725 | 2729 | ||
2726 | if (pci_using_dac) | 2730 | if (pci_using_dac) |
2727 | netdev->features |= NETIF_F_HIGHDMA; | 2731 | netdev->features |= NETIF_F_HIGHDMA; |
@@ -2742,7 +2746,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2742 | if (err) | 2746 | if (err) |
2743 | dev_info(&pdev->dev, "Error reading MAC address.\n"); | 2747 | dev_info(&pdev->dev, "Error reading MAC address.\n"); |
2744 | else if (is_zero_ether_addr(adapter->hw.mac.addr)) | 2748 | else if (is_zero_ether_addr(adapter->hw.mac.addr)) |
2745 | dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); | 2749 | dev_info(&pdev->dev, |
2750 | "MAC address not assigned by administrator.\n"); | ||
2746 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, | 2751 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, |
2747 | netdev->addr_len); | 2752 | netdev->addr_len); |
2748 | } | 2753 | } |
@@ -2751,11 +2756,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2751 | dev_info(&pdev->dev, "Assigning random MAC address.\n"); | 2756 | dev_info(&pdev->dev, "Assigning random MAC address.\n"); |
2752 | eth_hw_addr_random(netdev); | 2757 | eth_hw_addr_random(netdev); |
2753 | memcpy(adapter->hw.mac.addr, netdev->dev_addr, | 2758 | memcpy(adapter->hw.mac.addr, netdev->dev_addr, |
2754 | netdev->addr_len); | 2759 | netdev->addr_len); |
2755 | } | 2760 | } |
2756 | 2761 | ||
2757 | setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, | 2762 | setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, |
2758 | (unsigned long) adapter); | 2763 | (unsigned long)adapter); |
2759 | 2764 | ||
2760 | INIT_WORK(&adapter->reset_task, igbvf_reset_task); | 2765 | INIT_WORK(&adapter->reset_task, igbvf_reset_task); |
2761 | INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); | 2766 | INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); |
@@ -2818,8 +2823,7 @@ static void igbvf_remove(struct pci_dev *pdev) | |||
2818 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 2823 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
2819 | struct e1000_hw *hw = &adapter->hw; | 2824 | struct e1000_hw *hw = &adapter->hw; |
2820 | 2825 | ||
2821 | /* | 2826 | /* The watchdog timer may be rescheduled, so explicitly |
2822 | * The watchdog timer may be rescheduled, so explicitly | ||
2823 | * disable it from being rescheduled. | 2827 | * disable it from being rescheduled. |
2824 | */ | 2828 | */ |
2825 | set_bit(__IGBVF_DOWN, &adapter->state); | 2829 | set_bit(__IGBVF_DOWN, &adapter->state); |
@@ -2832,9 +2836,8 @@ static void igbvf_remove(struct pci_dev *pdev) | |||
2832 | 2836 | ||
2833 | igbvf_reset_interrupt_capability(adapter); | 2837 | igbvf_reset_interrupt_capability(adapter); |
2834 | 2838 | ||
2835 | /* | 2839 | /* it is important to delete the NAPI struct prior to freeing the |
2836 | * it is important to delete the napi struct prior to freeing the | 2840 | * Rx ring so that you do not end up with null pointer refs |
2837 | * rx ring so that you do not end up with null pointer refs | ||
2838 | */ | 2841 | */ |
2839 | netif_napi_del(&adapter->rx_ring->napi); | 2842 | netif_napi_del(&adapter->rx_ring->napi); |
2840 | kfree(adapter->tx_ring); | 2843 | kfree(adapter->tx_ring); |
@@ -2866,17 +2869,17 @@ MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); | |||
2866 | 2869 | ||
2867 | /* PCI Device API Driver */ | 2870 | /* PCI Device API Driver */ |
2868 | static struct pci_driver igbvf_driver = { | 2871 | static struct pci_driver igbvf_driver = { |
2869 | .name = igbvf_driver_name, | 2872 | .name = igbvf_driver_name, |
2870 | .id_table = igbvf_pci_tbl, | 2873 | .id_table = igbvf_pci_tbl, |
2871 | .probe = igbvf_probe, | 2874 | .probe = igbvf_probe, |
2872 | .remove = igbvf_remove, | 2875 | .remove = igbvf_remove, |
2873 | #ifdef CONFIG_PM | 2876 | #ifdef CONFIG_PM |
2874 | /* Power Management Hooks */ | 2877 | /* Power Management Hooks */ |
2875 | .suspend = igbvf_suspend, | 2878 | .suspend = igbvf_suspend, |
2876 | .resume = igbvf_resume, | 2879 | .resume = igbvf_resume, |
2877 | #endif | 2880 | #endif |
2878 | .shutdown = igbvf_shutdown, | 2881 | .shutdown = igbvf_shutdown, |
2879 | .err_handler = &igbvf_err_handler | 2882 | .err_handler = &igbvf_err_handler |
2880 | }; | 2883 | }; |
2881 | 2884 | ||
2882 | /** | 2885 | /** |
@@ -2888,6 +2891,7 @@ static struct pci_driver igbvf_driver = { | |||
2888 | static int __init igbvf_init_module(void) | 2891 | static int __init igbvf_init_module(void) |
2889 | { | 2892 | { |
2890 | int ret; | 2893 | int ret; |
2894 | |||
2891 | pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); | 2895 | pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); |
2892 | pr_info("%s\n", igbvf_copyright); | 2896 | pr_info("%s\n", igbvf_copyright); |
2893 | 2897 | ||
@@ -2909,7 +2913,6 @@ static void __exit igbvf_exit_module(void) | |||
2909 | } | 2913 | } |
2910 | module_exit(igbvf_exit_module); | 2914 | module_exit(igbvf_exit_module); |
2911 | 2915 | ||
2912 | |||
2913 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); | 2916 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); |
2914 | MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); | 2917 | MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); |
2915 | MODULE_LICENSE("GPL"); | 2918 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h index 7dc6341715dc..86a7c120b574 100644 --- a/drivers/net/ethernet/intel/igbvf/regs.h +++ b/drivers/net/ethernet/intel/igbvf/regs.h | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -28,81 +27,81 @@ | |||
28 | #ifndef _E1000_REGS_H_ | 27 | #ifndef _E1000_REGS_H_ |
29 | #define _E1000_REGS_H_ | 28 | #define _E1000_REGS_H_ |
30 | 29 | ||
31 | #define E1000_CTRL 0x00000 /* Device Control - RW */ | 30 | #define E1000_CTRL 0x00000 /* Device Control - RW */ |
32 | #define E1000_STATUS 0x00008 /* Device Status - RO */ | 31 | #define E1000_STATUS 0x00008 /* Device Status - RO */ |
33 | #define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ | 32 | #define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ |
34 | #define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ | 33 | #define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ |
35 | #define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) | 34 | #define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) |
36 | #define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ | 35 | #define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ |
37 | #define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ | 36 | #define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ |
38 | #define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ | 37 | #define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ |
39 | #define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ | 38 | #define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ |
40 | #define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ | 39 | #define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ |
41 | #define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ | 40 | #define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ |
42 | #define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ | 41 | #define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ |
43 | /* | 42 | |
44 | * Convenience macros | 43 | /* Convenience macros |
45 | * | 44 | * |
46 | * Note: "_n" is the queue number of the register to be written to. | 45 | * Note: "_n" is the queue number of the register to be written to. |
47 | * | 46 | * |
48 | * Example usage: | 47 | * Example usage: |
49 | * E1000_RDBAL_REG(current_rx_queue) | 48 | * E1000_RDBAL_REG(current_rx_queue) |
50 | */ | 49 | */ |
51 | #define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ | 50 | #define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ |
52 | (0x0C000 + ((_n) * 0x40))) | 51 | (0x0C000 + ((_n) * 0x40))) |
53 | #define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ | 52 | #define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ |
54 | (0x0C004 + ((_n) * 0x40))) | 53 | (0x0C004 + ((_n) * 0x40))) |
55 | #define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ | 54 | #define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ |
56 | (0x0C008 + ((_n) * 0x40))) | 55 | (0x0C008 + ((_n) * 0x40))) |
57 | #define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ | 56 | #define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ |
58 | (0x0C00C + ((_n) * 0x40))) | 57 | (0x0C00C + ((_n) * 0x40))) |
59 | #define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ | 58 | #define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ |
60 | (0x0C010 + ((_n) * 0x40))) | 59 | (0x0C010 + ((_n) * 0x40))) |
61 | #define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ | 60 | #define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ |
62 | (0x0C018 + ((_n) * 0x40))) | 61 | (0x0C018 + ((_n) * 0x40))) |
63 | #define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ | 62 | #define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ |
64 | (0x0C028 + ((_n) * 0x40))) | 63 | (0x0C028 + ((_n) * 0x40))) |
65 | #define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ | 64 | #define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ |
66 | (0x0E000 + ((_n) * 0x40))) | 65 | (0x0E000 + ((_n) * 0x40))) |
67 | #define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ | 66 | #define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ |
68 | (0x0E004 + ((_n) * 0x40))) | 67 | (0x0E004 + ((_n) * 0x40))) |
69 | #define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ | 68 | #define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ |
70 | (0x0E008 + ((_n) * 0x40))) | 69 | (0x0E008 + ((_n) * 0x40))) |
71 | #define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ | 70 | #define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ |
72 | (0x0E010 + ((_n) * 0x40))) | 71 | (0x0E010 + ((_n) * 0x40))) |
73 | #define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ | 72 | #define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ |
74 | (0x0E018 + ((_n) * 0x40))) | 73 | (0x0E018 + ((_n) * 0x40))) |
75 | #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ | 74 | #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ |
76 | (0x0E028 + ((_n) * 0x40))) | 75 | (0x0E028 + ((_n) * 0x40))) |
77 | #define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) | 76 | #define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) |
78 | #define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) | 77 | #define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) |
79 | #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ | 78 | #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ |
80 | (0x054E0 + ((_i - 16) * 8))) | 79 | (0x054E0 + ((_i - 16) * 8))) |
81 | #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ | 80 | #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ |
82 | (0x054E4 + ((_i - 16) * 8))) | 81 | (0x054E4 + ((_i - 16) * 8))) |
83 | 82 | ||
84 | /* Statistics registers */ | 83 | /* Statistics registers */ |
85 | #define E1000_VFGPRC 0x00F10 | 84 | #define E1000_VFGPRC 0x00F10 |
86 | #define E1000_VFGORC 0x00F18 | 85 | #define E1000_VFGORC 0x00F18 |
87 | #define E1000_VFMPRC 0x00F3C | 86 | #define E1000_VFMPRC 0x00F3C |
88 | #define E1000_VFGPTC 0x00F14 | 87 | #define E1000_VFGPTC 0x00F14 |
89 | #define E1000_VFGOTC 0x00F34 | 88 | #define E1000_VFGOTC 0x00F34 |
90 | #define E1000_VFGOTLBC 0x00F50 | 89 | #define E1000_VFGOTLBC 0x00F50 |
91 | #define E1000_VFGPTLBC 0x00F44 | 90 | #define E1000_VFGPTLBC 0x00F44 |
92 | #define E1000_VFGORLBC 0x00F48 | 91 | #define E1000_VFGORLBC 0x00F48 |
93 | #define E1000_VFGPRLBC 0x00F40 | 92 | #define E1000_VFGPRLBC 0x00F40 |
94 | 93 | ||
95 | /* These act per VF so an array friendly macro is used */ | 94 | /* These act per VF so an array friendly macro is used */ |
96 | #define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) | 95 | #define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) |
97 | #define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) | 96 | #define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) |
98 | 97 | ||
99 | /* Define macros for handling registers */ | 98 | /* Define macros for handling registers */ |
100 | #define er32(reg) readl(hw->hw_addr + E1000_##reg) | 99 | #define er32(reg) readl(hw->hw_addr + E1000_##reg) |
101 | #define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) | 100 | #define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) |
102 | #define array_er32(reg, offset) \ | 101 | #define array_er32(reg, offset) \ |
103 | readl(hw->hw_addr + E1000_##reg + (offset << 2)) | 102 | readl(hw->hw_addr + E1000_##reg + (offset << 2)) |
104 | #define array_ew32(reg, offset, val) \ | 103 | #define array_ew32(reg, offset, val) \ |
105 | writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) | 104 | writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) |
106 | #define e1e_flush() er32(STATUS) | 105 | #define e1e_flush() er32(STATUS) |
107 | 106 | ||
108 | #endif | 107 | #endif |
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 955ad8c2c534..a13baa90ae20 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -25,17 +24,16 @@ | |||
25 | 24 | ||
26 | *******************************************************************************/ | 25 | *******************************************************************************/ |
27 | 26 | ||
28 | |||
29 | #include "vf.h" | 27 | #include "vf.h" |
30 | 28 | ||
31 | static s32 e1000_check_for_link_vf(struct e1000_hw *hw); | 29 | static s32 e1000_check_for_link_vf(struct e1000_hw *hw); |
32 | static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, | 30 | static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, |
33 | u16 *duplex); | 31 | u16 *duplex); |
34 | static s32 e1000_init_hw_vf(struct e1000_hw *hw); | 32 | static s32 e1000_init_hw_vf(struct e1000_hw *hw); |
35 | static s32 e1000_reset_hw_vf(struct e1000_hw *hw); | 33 | static s32 e1000_reset_hw_vf(struct e1000_hw *hw); |
36 | 34 | ||
37 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, | 35 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, |
38 | u32, u32, u32); | 36 | u32, u32, u32); |
39 | static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); | 37 | static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); |
40 | static s32 e1000_read_mac_addr_vf(struct e1000_hw *); | 38 | static s32 e1000_read_mac_addr_vf(struct e1000_hw *); |
41 | static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); | 39 | static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); |
@@ -94,7 +92,7 @@ void e1000_init_function_pointers_vf(struct e1000_hw *hw) | |||
94 | * the status register's data which is often stale and inaccurate. | 92 | * the status register's data which is often stale and inaccurate. |
95 | **/ | 93 | **/ |
96 | static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, | 94 | static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, |
97 | u16 *duplex) | 95 | u16 *duplex) |
98 | { | 96 | { |
99 | s32 status; | 97 | s32 status; |
100 | 98 | ||
@@ -130,7 +128,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) | |||
130 | u8 *addr = (u8 *)(&msgbuf[1]); | 128 | u8 *addr = (u8 *)(&msgbuf[1]); |
131 | u32 ctrl; | 129 | u32 ctrl; |
132 | 130 | ||
133 | /* assert vf queue/interrupt reset */ | 131 | /* assert VF queue/interrupt reset */ |
134 | ctrl = er32(CTRL); | 132 | ctrl = er32(CTRL); |
135 | ew32(CTRL, ctrl | E1000_CTRL_RST); | 133 | ew32(CTRL, ctrl | E1000_CTRL_RST); |
136 | 134 | ||
@@ -144,7 +142,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) | |||
144 | /* mailbox timeout can now become active */ | 142 | /* mailbox timeout can now become active */ |
145 | mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; | 143 | mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; |
146 | 144 | ||
147 | /* notify pf of vf reset completion */ | 145 | /* notify PF of VF reset completion */ |
148 | msgbuf[0] = E1000_VF_RESET; | 146 | msgbuf[0] = E1000_VF_RESET; |
149 | mbx->ops.write_posted(hw, msgbuf, 1); | 147 | mbx->ops.write_posted(hw, msgbuf, 1); |
150 | 148 | ||
@@ -153,7 +151,8 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) | |||
153 | /* set our "perm_addr" based on info provided by PF */ | 151 | /* set our "perm_addr" based on info provided by PF */ |
154 | ret_val = mbx->ops.read_posted(hw, msgbuf, 3); | 152 | ret_val = mbx->ops.read_posted(hw, msgbuf, 3); |
155 | if (!ret_val) { | 153 | if (!ret_val) { |
156 | if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) | 154 | if (msgbuf[0] == (E1000_VF_RESET | |
155 | E1000_VT_MSGTYPE_ACK)) | ||
157 | memcpy(hw->mac.perm_addr, addr, ETH_ALEN); | 156 | memcpy(hw->mac.perm_addr, addr, ETH_ALEN); |
158 | else | 157 | else |
159 | ret_val = -E1000_ERR_MAC_INIT; | 158 | ret_val = -E1000_ERR_MAC_INIT; |
@@ -194,15 +193,14 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) | |||
194 | /* Register count multiplied by bits per register */ | 193 | /* Register count multiplied by bits per register */ |
195 | hash_mask = (hw->mac.mta_reg_count * 32) - 1; | 194 | hash_mask = (hw->mac.mta_reg_count * 32) - 1; |
196 | 195 | ||
197 | /* | 196 | /* The bit_shift is the number of left-shifts |
198 | * The bit_shift is the number of left-shifts | ||
199 | * where 0xFF would still fall within the hash mask. | 197 | * where 0xFF would still fall within the hash mask. |
200 | */ | 198 | */ |
201 | while (hash_mask >> bit_shift != 0xFF) | 199 | while (hash_mask >> bit_shift != 0xFF) |
202 | bit_shift++; | 200 | bit_shift++; |
203 | 201 | ||
204 | hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | | 202 | hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | |
205 | (((u16) mc_addr[5]) << bit_shift))); | 203 | (((u16)mc_addr[5]) << bit_shift))); |
206 | 204 | ||
207 | return hash_value; | 205 | return hash_value; |
208 | } | 206 | } |
@@ -221,8 +219,8 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) | |||
221 | * unless there are workarounds that change this. | 219 | * unless there are workarounds that change this. |
222 | **/ | 220 | **/ |
223 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, | 221 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, |
224 | u8 *mc_addr_list, u32 mc_addr_count, | 222 | u8 *mc_addr_list, u32 mc_addr_count, |
225 | u32 rar_used_count, u32 rar_count) | 223 | u32 rar_used_count, u32 rar_count) |
226 | { | 224 | { |
227 | struct e1000_mbx_info *mbx = &hw->mbx; | 225 | struct e1000_mbx_info *mbx = &hw->mbx; |
228 | u32 msgbuf[E1000_VFMAILBOX_SIZE]; | 226 | u32 msgbuf[E1000_VFMAILBOX_SIZE]; |
@@ -305,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) | |||
305 | * @addr: pointer to the receive address | 303 | * @addr: pointer to the receive address |
306 | * @index: receive address array register | 304 | * @index: receive address array register |
307 | **/ | 305 | **/ |
308 | static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) | 306 | static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index) |
309 | { | 307 | { |
310 | struct e1000_mbx_info *mbx = &hw->mbx; | 308 | struct e1000_mbx_info *mbx = &hw->mbx; |
311 | u32 msgbuf[3]; | 309 | u32 msgbuf[3]; |
@@ -354,8 +352,7 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) | |||
354 | s32 ret_val = E1000_SUCCESS; | 352 | s32 ret_val = E1000_SUCCESS; |
355 | u32 in_msg = 0; | 353 | u32 in_msg = 0; |
356 | 354 | ||
357 | /* | 355 | /* We only want to run this if there has been a rst asserted. |
358 | * We only want to run this if there has been a rst asserted. | ||
359 | * in this case that could mean a link change, device reset, | 356 | * in this case that could mean a link change, device reset, |
360 | * or a virtual function reset | 357 | * or a virtual function reset |
361 | */ | 358 | */ |
@@ -367,31 +364,33 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) | |||
367 | if (!mac->get_link_status) | 364 | if (!mac->get_link_status) |
368 | goto out; | 365 | goto out; |
369 | 366 | ||
370 | /* if link status is down no point in checking to see if pf is up */ | 367 | /* if link status is down no point in checking to see if PF is up */ |
371 | if (!(er32(STATUS) & E1000_STATUS_LU)) | 368 | if (!(er32(STATUS) & E1000_STATUS_LU)) |
372 | goto out; | 369 | goto out; |
373 | 370 | ||
374 | /* if the read failed it could just be a mailbox collision, best wait | 371 | /* if the read failed it could just be a mailbox collision, best wait |
375 | * until we are called again and don't report an error */ | 372 | * until we are called again and don't report an error |
373 | */ | ||
376 | if (mbx->ops.read(hw, &in_msg, 1)) | 374 | if (mbx->ops.read(hw, &in_msg, 1)) |
377 | goto out; | 375 | goto out; |
378 | 376 | ||
379 | /* if incoming message isn't clear to send we are waiting on response */ | 377 | /* if incoming message isn't clear to send we are waiting on response */ |
380 | if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { | 378 | if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { |
381 | /* message is not CTS and is NACK we must have lost CTS status */ | 379 | /* msg is not CTS and is NACK we must have lost CTS status */ |
382 | if (in_msg & E1000_VT_MSGTYPE_NACK) | 380 | if (in_msg & E1000_VT_MSGTYPE_NACK) |
383 | ret_val = -E1000_ERR_MAC_INIT; | 381 | ret_val = -E1000_ERR_MAC_INIT; |
384 | goto out; | 382 | goto out; |
385 | } | 383 | } |
386 | 384 | ||
387 | /* the pf is talking, if we timed out in the past we reinit */ | 385 | /* the PF is talking, if we timed out in the past we reinit */ |
388 | if (!mbx->timeout) { | 386 | if (!mbx->timeout) { |
389 | ret_val = -E1000_ERR_MAC_INIT; | 387 | ret_val = -E1000_ERR_MAC_INIT; |
390 | goto out; | 388 | goto out; |
391 | } | 389 | } |
392 | 390 | ||
393 | /* if we passed all the tests above then the link is up and we no | 391 | /* if we passed all the tests above then the link is up and we no |
394 | * longer need to check for link */ | 392 | * longer need to check for link |
393 | */ | ||
395 | mac->get_link_status = false; | 394 | mac->get_link_status = false; |
396 | 395 | ||
397 | out: | 396 | out: |
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 57db3c68dfcd..0f1eca639f68 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -38,30 +37,29 @@ | |||
38 | 37 | ||
39 | struct e1000_hw; | 38 | struct e1000_hw; |
40 | 39 | ||
41 | #define E1000_DEV_ID_82576_VF 0x10CA | 40 | #define E1000_DEV_ID_82576_VF 0x10CA |
42 | #define E1000_DEV_ID_I350_VF 0x1520 | 41 | #define E1000_DEV_ID_I350_VF 0x1520 |
43 | #define E1000_REVISION_0 0 | 42 | #define E1000_REVISION_0 0 |
44 | #define E1000_REVISION_1 1 | 43 | #define E1000_REVISION_1 1 |
45 | #define E1000_REVISION_2 2 | 44 | #define E1000_REVISION_2 2 |
46 | #define E1000_REVISION_3 3 | 45 | #define E1000_REVISION_3 3 |
47 | #define E1000_REVISION_4 4 | 46 | #define E1000_REVISION_4 4 |
48 | 47 | ||
49 | #define E1000_FUNC_0 0 | 48 | #define E1000_FUNC_0 0 |
50 | #define E1000_FUNC_1 1 | 49 | #define E1000_FUNC_1 1 |
51 | 50 | ||
52 | /* | 51 | /* Receive Address Register Count |
53 | * Receive Address Register Count | ||
54 | * Number of high/low register pairs in the RAR. The RAR (Receive Address | 52 | * Number of high/low register pairs in the RAR. The RAR (Receive Address |
55 | * Registers) holds the directed and multicast addresses that we monitor. | 53 | * Registers) holds the directed and multicast addresses that we monitor. |
56 | * These entries are also used for MAC-based filtering. | 54 | * These entries are also used for MAC-based filtering. |
57 | */ | 55 | */ |
58 | #define E1000_RAR_ENTRIES_VF 1 | 56 | #define E1000_RAR_ENTRIES_VF 1 |
59 | 57 | ||
60 | /* Receive Descriptor - Advanced */ | 58 | /* Receive Descriptor - Advanced */ |
61 | union e1000_adv_rx_desc { | 59 | union e1000_adv_rx_desc { |
62 | struct { | 60 | struct { |
63 | u64 pkt_addr; /* Packet buffer address */ | 61 | u64 pkt_addr; /* Packet buffer address */ |
64 | u64 hdr_addr; /* Header buffer address */ | 62 | u64 hdr_addr; /* Header buffer address */ |
65 | } read; | 63 | } read; |
66 | struct { | 64 | struct { |
67 | struct { | 65 | struct { |
@@ -69,53 +67,53 @@ union e1000_adv_rx_desc { | |||
69 | u32 data; | 67 | u32 data; |
70 | struct { | 68 | struct { |
71 | u16 pkt_info; /* RSS/Packet type */ | 69 | u16 pkt_info; /* RSS/Packet type */ |
72 | u16 hdr_info; /* Split Header, | 70 | /* Split Header, hdr buffer length */ |
73 | * hdr buffer length */ | 71 | u16 hdr_info; |
74 | } hs_rss; | 72 | } hs_rss; |
75 | } lo_dword; | 73 | } lo_dword; |
76 | union { | 74 | union { |
77 | u32 rss; /* RSS Hash */ | 75 | u32 rss; /* RSS Hash */ |
78 | struct { | 76 | struct { |
79 | u16 ip_id; /* IP id */ | 77 | u16 ip_id; /* IP id */ |
80 | u16 csum; /* Packet Checksum */ | 78 | u16 csum; /* Packet Checksum */ |
81 | } csum_ip; | 79 | } csum_ip; |
82 | } hi_dword; | 80 | } hi_dword; |
83 | } lower; | 81 | } lower; |
84 | struct { | 82 | struct { |
85 | u32 status_error; /* ext status/error */ | 83 | u32 status_error; /* ext status/error */ |
86 | u16 length; /* Packet length */ | 84 | u16 length; /* Packet length */ |
87 | u16 vlan; /* VLAN tag */ | 85 | u16 vlan; /* VLAN tag */ |
88 | } upper; | 86 | } upper; |
89 | } wb; /* writeback */ | 87 | } wb; /* writeback */ |
90 | }; | 88 | }; |
91 | 89 | ||
92 | #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 | 90 | #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 |
93 | #define E1000_RXDADV_HDRBUFLEN_SHIFT 5 | 91 | #define E1000_RXDADV_HDRBUFLEN_SHIFT 5 |
94 | 92 | ||
95 | /* Transmit Descriptor - Advanced */ | 93 | /* Transmit Descriptor - Advanced */ |
96 | union e1000_adv_tx_desc { | 94 | union e1000_adv_tx_desc { |
97 | struct { | 95 | struct { |
98 | u64 buffer_addr; /* Address of descriptor's data buf */ | 96 | u64 buffer_addr; /* Address of descriptor's data buf */ |
99 | u32 cmd_type_len; | 97 | u32 cmd_type_len; |
100 | u32 olinfo_status; | 98 | u32 olinfo_status; |
101 | } read; | 99 | } read; |
102 | struct { | 100 | struct { |
103 | u64 rsvd; /* Reserved */ | 101 | u64 rsvd; /* Reserved */ |
104 | u32 nxtseq_seed; | 102 | u32 nxtseq_seed; |
105 | u32 status; | 103 | u32 status; |
106 | } wb; | 104 | } wb; |
107 | }; | 105 | }; |
108 | 106 | ||
109 | /* Adv Transmit Descriptor Config Masks */ | 107 | /* Adv Transmit Descriptor Config Masks */ |
110 | #define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ | 108 | #define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ |
111 | #define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ | 109 | #define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ |
112 | #define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ | 110 | #define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ |
113 | #define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ | 111 | #define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ |
114 | #define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ | 112 | #define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ |
115 | #define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ | 113 | #define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ |
116 | #define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ | 114 | #define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ |
117 | #define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ | 115 | #define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ |
118 | #define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ | 116 | #define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ |
119 | 117 | ||
120 | /* Context descriptors */ | 118 | /* Context descriptors */ |
121 | struct e1000_adv_tx_context_desc { | 119 | struct e1000_adv_tx_context_desc { |
@@ -125,11 +123,11 @@ struct e1000_adv_tx_context_desc { | |||
125 | u32 mss_l4len_idx; | 123 | u32 mss_l4len_idx; |
126 | }; | 124 | }; |
127 | 125 | ||
128 | #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ | 126 | #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ |
129 | #define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ | 127 | #define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ |
130 | #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ | 128 | #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ |
131 | #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ | 129 | #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ |
132 | #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ | 130 | #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ |
133 | 131 | ||
134 | enum e1000_mac_type { | 132 | enum e1000_mac_type { |
135 | e1000_undefined = 0, | 133 | e1000_undefined = 0, |
@@ -262,5 +260,4 @@ struct e1000_hw { | |||
262 | void e1000_rlpml_set_vf(struct e1000_hw *, u16); | 260 | void e1000_rlpml_set_vf(struct e1000_hw *, u16); |
263 | void e1000_init_function_pointers_vf(struct e1000_hw *hw); | 261 | void e1000_init_function_pointers_vf(struct e1000_hw *hw); |
264 | 262 | ||
265 | |||
266 | #endif /* _E1000_VF_H_ */ | 263 | #endif /* _E1000_VF_H_ */ |