diff options
author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2015-02-10 06:42:33 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2015-03-13 18:41:07 -0400 |
commit | dec0d8e462322aec38990856aafb0cfbf686f4ff (patch) | |
tree | 302e5e6879a1f14d837b24dc0f2497c6158ffebd /drivers/net/ethernet | |
parent | 856f606ea9756d1222bbd137641024e29d9d6b43 (diff) |
ixgbevf: Fix code comments and whitespace
Fix the code comments to align with drivers/net/ code commenting style,
as well as whitespace issues. The whitespace issues resolve checkpatch
errors, like lines exceeding 80 chars (except for strings) and the use
of tabs where possible.
CC: <kernel-team@fb.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/defines.h | 285 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ethtool.c | 86 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 82 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 229 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/mbx.c | 43 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/mbx.h | 93 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/regs.h | 105 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/vf.c | 55 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/vf.h | 15 |
9 files changed, 498 insertions, 495 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 7412d378b77b..770e21a64388 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2012 Intel Corporation. | 4 | Copyright(c) 1999 - 2015 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -29,138 +28,138 @@ | |||
29 | #define _IXGBEVF_DEFINES_H_ | 28 | #define _IXGBEVF_DEFINES_H_ |
30 | 29 | ||
31 | /* Device IDs */ | 30 | /* Device IDs */ |
32 | #define IXGBE_DEV_ID_82599_VF 0x10ED | 31 | #define IXGBE_DEV_ID_82599_VF 0x10ED |
33 | #define IXGBE_DEV_ID_X540_VF 0x1515 | 32 | #define IXGBE_DEV_ID_X540_VF 0x1515 |
34 | #define IXGBE_DEV_ID_X550_VF 0x1565 | 33 | #define IXGBE_DEV_ID_X550_VF 0x1565 |
35 | #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 | 34 | #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 |
36 | 35 | ||
37 | #define IXGBE_VF_IRQ_CLEAR_MASK 7 | 36 | #define IXGBE_VF_IRQ_CLEAR_MASK 7 |
38 | #define IXGBE_VF_MAX_TX_QUEUES 8 | 37 | #define IXGBE_VF_MAX_TX_QUEUES 8 |
39 | #define IXGBE_VF_MAX_RX_QUEUES 8 | 38 | #define IXGBE_VF_MAX_RX_QUEUES 8 |
40 | 39 | ||
41 | /* DCB define */ | 40 | /* DCB define */ |
42 | #define IXGBE_VF_MAX_TRAFFIC_CLASS 8 | 41 | #define IXGBE_VF_MAX_TRAFFIC_CLASS 8 |
43 | 42 | ||
44 | /* Link speed */ | 43 | /* Link speed */ |
45 | typedef u32 ixgbe_link_speed; | 44 | typedef u32 ixgbe_link_speed; |
46 | #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 | 45 | #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 |
47 | #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 | 46 | #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 |
48 | #define IXGBE_LINK_SPEED_100_FULL 0x0008 | 47 | #define IXGBE_LINK_SPEED_100_FULL 0x0008 |
49 | 48 | ||
50 | #define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ | 49 | #define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ |
51 | #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ | 50 | #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ |
52 | #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ | 51 | #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ |
53 | #define IXGBE_LINKS_UP 0x40000000 | 52 | #define IXGBE_LINKS_UP 0x40000000 |
54 | #define IXGBE_LINKS_SPEED_82599 0x30000000 | 53 | #define IXGBE_LINKS_SPEED_82599 0x30000000 |
55 | #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 | 54 | #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 |
56 | #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 | 55 | #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 |
57 | #define IXGBE_LINKS_SPEED_100_82599 0x10000000 | 56 | #define IXGBE_LINKS_SPEED_100_82599 0x10000000 |
58 | 57 | ||
59 | /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ | 58 | /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ |
60 | #define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 | 59 | #define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 |
61 | #define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 | 60 | #define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 |
62 | #define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 | 61 | #define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 |
63 | 62 | ||
64 | /* Interrupt Vector Allocation Registers */ | 63 | /* Interrupt Vector Allocation Registers */ |
65 | #define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ | 64 | #define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ |
66 | 65 | ||
67 | #define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ | 66 | #define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ |
68 | 67 | ||
69 | /* Receive Config masks */ | 68 | /* Receive Config masks */ |
70 | #define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ | 69 | #define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ |
71 | #define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ | 70 | #define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ |
72 | #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ | 71 | #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ |
73 | #define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ | 72 | #define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ |
74 | #define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ | 73 | #define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ |
75 | #define IXGBE_RXDCTL_RLPML_EN 0x00008000 | 74 | #define IXGBE_RXDCTL_RLPML_EN 0x00008000 |
76 | 75 | ||
77 | /* DCA Control */ | 76 | /* DCA Control */ |
78 | #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ | 77 | #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ |
79 | 78 | ||
80 | /* PSRTYPE bit definitions */ | 79 | /* PSRTYPE bit definitions */ |
81 | #define IXGBE_PSRTYPE_TCPHDR 0x00000010 | 80 | #define IXGBE_PSRTYPE_TCPHDR 0x00000010 |
82 | #define IXGBE_PSRTYPE_UDPHDR 0x00000020 | 81 | #define IXGBE_PSRTYPE_UDPHDR 0x00000020 |
83 | #define IXGBE_PSRTYPE_IPV4HDR 0x00000100 | 82 | #define IXGBE_PSRTYPE_IPV4HDR 0x00000100 |
84 | #define IXGBE_PSRTYPE_IPV6HDR 0x00000200 | 83 | #define IXGBE_PSRTYPE_IPV6HDR 0x00000200 |
85 | #define IXGBE_PSRTYPE_L2HDR 0x00001000 | 84 | #define IXGBE_PSRTYPE_L2HDR 0x00001000 |
86 | 85 | ||
87 | /* SRRCTL bit definitions */ | 86 | /* SRRCTL bit definitions */ |
88 | #define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ | 87 | #define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ |
89 | #define IXGBE_SRRCTL_RDMTS_SHIFT 22 | 88 | #define IXGBE_SRRCTL_RDMTS_SHIFT 22 |
90 | #define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 | 89 | #define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 |
91 | #define IXGBE_SRRCTL_DROP_EN 0x10000000 | 90 | #define IXGBE_SRRCTL_DROP_EN 0x10000000 |
92 | #define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F | 91 | #define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F |
93 | #define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 | 92 | #define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 |
94 | #define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 | 93 | #define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 |
95 | #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 | 94 | #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 |
96 | #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 | 95 | #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 |
97 | #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 | 96 | #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 |
98 | #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 | 97 | #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 |
99 | #define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 | 98 | #define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 |
100 | 99 | ||
101 | /* Receive Descriptor bit definitions */ | 100 | /* Receive Descriptor bit definitions */ |
102 | #define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ | 101 | #define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ |
103 | #define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ | 102 | #define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ |
104 | #define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ | 103 | #define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ |
105 | #define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ | 104 | #define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ |
106 | #define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ | 105 | #define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ |
107 | #define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 | 106 | #define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 |
108 | #define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ | 107 | #define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ |
109 | #define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ | 108 | #define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ |
110 | #define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ | 109 | #define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ |
111 | #define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ | 110 | #define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ |
112 | #define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ | 111 | #define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ |
113 | #define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ | 112 | #define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ |
114 | #define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ | 113 | #define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ |
115 | #define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ | 114 | #define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ |
116 | #define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ | 115 | #define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ |
117 | #define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ | 116 | #define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ |
118 | #define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ | 117 | #define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ |
119 | #define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ | 118 | #define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ |
120 | #define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ | 119 | #define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ |
121 | #define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ | 120 | #define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ |
122 | #define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ | 121 | #define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ |
123 | #define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ | 122 | #define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ |
124 | #define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ | 123 | #define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ |
125 | #define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ | 124 | #define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ |
126 | #define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ | 125 | #define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ |
127 | #define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ | 126 | #define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ |
128 | #define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ | 127 | #define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ |
129 | #define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ | 128 | #define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ |
130 | #define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ | 129 | #define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ |
131 | #define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ | 130 | #define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ |
132 | #define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ | 131 | #define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ |
133 | #define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ | 132 | #define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ |
134 | #define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ | 133 | #define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ |
135 | #define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ | 134 | #define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ |
136 | #define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ | 135 | #define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ |
137 | #define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ | 136 | #define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ |
138 | #define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ | 137 | #define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ |
139 | #define IXGBE_RXD_PRI_SHIFT 13 | 138 | #define IXGBE_RXD_PRI_SHIFT 13 |
140 | #define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ | 139 | #define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ |
141 | #define IXGBE_RXD_CFI_SHIFT 12 | 140 | #define IXGBE_RXD_CFI_SHIFT 12 |
142 | 141 | ||
143 | #define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ | 142 | #define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ |
144 | #define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ | 143 | #define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ |
145 | #define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ | 144 | #define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ |
146 | #define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ | 145 | #define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ |
147 | #define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ | 146 | #define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ |
148 | #define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ | 147 | #define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ |
149 | #define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ | 148 | #define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ |
150 | #define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ | 149 | #define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ |
151 | #define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ | 150 | #define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ |
152 | #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ | 151 | #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ |
153 | #define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ | 152 | #define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ |
154 | 153 | ||
155 | #define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F | 154 | #define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F |
156 | #define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 | 155 | #define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 |
157 | #define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 | 156 | #define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 |
158 | #define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 | 157 | #define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 |
159 | #define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 | 158 | #define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 |
160 | #define IXGBE_RXDADV_RSCCNT_SHIFT 17 | 159 | #define IXGBE_RXDADV_RSCCNT_SHIFT 17 |
161 | #define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 | 160 | #define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 |
162 | #define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 | 161 | #define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 |
163 | #define IXGBE_RXDADV_SPH 0x8000 | 162 | #define IXGBE_RXDADV_SPH 0x8000 |
164 | 163 | ||
165 | #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ | 164 | #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ |
166 | IXGBE_RXD_ERR_CE | \ | 165 | IXGBE_RXD_ERR_CE | \ |
@@ -176,16 +175,16 @@ typedef u32 ixgbe_link_speed; | |||
176 | IXGBE_RXDADV_ERR_OSE | \ | 175 | IXGBE_RXDADV_ERR_OSE | \ |
177 | IXGBE_RXDADV_ERR_USE) | 176 | IXGBE_RXDADV_ERR_USE) |
178 | 177 | ||
179 | #define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ | 178 | #define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ |
180 | #define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ | 179 | #define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ |
181 | #define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ | 180 | #define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ |
182 | #define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ | 181 | #define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ |
183 | #define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ | 182 | #define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ |
184 | #define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ | 183 | #define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ |
185 | #define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ | 184 | #define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor ext (0 = legacy) */ |
186 | #define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ | 185 | #define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ |
187 | #define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ | 186 | #define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ |
188 | #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) | 187 | #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) |
189 | 188 | ||
190 | /* Transmit Descriptor - Advanced */ | 189 | /* Transmit Descriptor - Advanced */ |
191 | union ixgbe_adv_tx_desc { | 190 | union ixgbe_adv_tx_desc { |
@@ -241,44 +240,44 @@ struct ixgbe_adv_tx_context_desc { | |||
241 | }; | 240 | }; |
242 | 241 | ||
243 | /* Adv Transmit Descriptor Config Masks */ | 242 | /* Adv Transmit Descriptor Config Masks */ |
244 | #define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ | 243 | #define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ |
245 | #define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ | 244 | #define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ |
246 | #define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ | 245 | #define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ |
247 | #define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ | 246 | #define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ |
248 | #define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ | 247 | #define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ |
249 | #define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ | 248 | #define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ |
250 | #define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ | 249 | #define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ |
251 | #define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ | 250 | #define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ |
252 | #define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ | 251 | #define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ |
253 | #define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ | 252 | #define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ |
254 | #define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ | 253 | #define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ |
255 | #define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ | 254 | #define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ |
256 | #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ | 255 | #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ |
257 | #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ | 256 | #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ |
258 | #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ | 257 | #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ |
259 | #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ | 258 | #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ |
260 | #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ | 259 | #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ |
261 | #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ | 260 | #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ |
262 | #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ | 261 | #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ |
263 | IXGBE_ADVTXD_POPTS_SHIFT) | 262 | IXGBE_ADVTXD_POPTS_SHIFT) |
264 | #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ | 263 | #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ |
265 | IXGBE_ADVTXD_POPTS_SHIFT) | 264 | IXGBE_ADVTXD_POPTS_SHIFT) |
266 | #define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ | 265 | #define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ |
267 | #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ | 266 | #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ |
268 | #define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ | 267 | #define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ |
269 | #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ | 268 | #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ |
270 | #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ | 269 | #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ |
271 | 270 | ||
272 | /* Interrupt register bitmasks */ | 271 | /* Interrupt register bitmasks */ |
273 | 272 | ||
274 | #define IXGBE_EITR_CNT_WDIS 0x80000000 | 273 | #define IXGBE_EITR_CNT_WDIS 0x80000000 |
275 | #define IXGBE_MAX_EITR 0x00000FF8 | 274 | #define IXGBE_MAX_EITR 0x00000FF8 |
276 | #define IXGBE_MIN_EITR 8 | 275 | #define IXGBE_MIN_EITR 8 |
277 | 276 | ||
278 | /* Error Codes */ | 277 | /* Error Codes */ |
279 | #define IXGBE_ERR_INVALID_MAC_ADDR -1 | 278 | #define IXGBE_ERR_INVALID_MAC_ADDR -1 |
280 | #define IXGBE_ERR_RESET_FAILED -2 | 279 | #define IXGBE_ERR_RESET_FAILED -2 |
281 | #define IXGBE_ERR_INVALID_ARGUMENT -3 | 280 | #define IXGBE_ERR_INVALID_ARGUMENT -3 |
282 | 281 | ||
283 | /* Transmit Config masks */ | 282 | /* Transmit Config masks */ |
284 | #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ | 283 | #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index cc0e5b7ff041..e83c85bf2602 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2014 Intel Corporation. | 4 | Copyright(c) 1999 - 2015 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -100,6 +99,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { | |||
100 | "Register test (offline)", | 99 | "Register test (offline)", |
101 | "Link test (on/offline)" | 100 | "Link test (on/offline)" |
102 | }; | 101 | }; |
102 | |||
103 | #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) | 103 | #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) |
104 | 104 | ||
105 | static int ixgbevf_get_settings(struct net_device *netdev, | 105 | static int ixgbevf_get_settings(struct net_device *netdev, |
@@ -120,6 +120,7 @@ static int ixgbevf_get_settings(struct net_device *netdev, | |||
120 | 120 | ||
121 | if (link_up) { | 121 | if (link_up) { |
122 | __u32 speed = SPEED_10000; | 122 | __u32 speed = SPEED_10000; |
123 | |||
123 | switch (link_speed) { | 124 | switch (link_speed) { |
124 | case IXGBE_LINK_SPEED_10GB_FULL: | 125 | case IXGBE_LINK_SPEED_10GB_FULL: |
125 | speed = SPEED_10000; | 126 | speed = SPEED_10000; |
@@ -145,12 +146,14 @@ static int ixgbevf_get_settings(struct net_device *netdev, | |||
145 | static u32 ixgbevf_get_msglevel(struct net_device *netdev) | 146 | static u32 ixgbevf_get_msglevel(struct net_device *netdev) |
146 | { | 147 | { |
147 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | 148 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
149 | |||
148 | return adapter->msg_enable; | 150 | return adapter->msg_enable; |
149 | } | 151 | } |
150 | 152 | ||
151 | static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) | 153 | static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) |
152 | { | 154 | { |
153 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | 155 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
156 | |||
154 | adapter->msg_enable = data; | 157 | adapter->msg_enable = data; |
155 | } | 158 | } |
156 | 159 | ||
@@ -185,7 +188,8 @@ static void ixgbevf_get_regs(struct net_device *netdev, | |||
185 | 188 | ||
186 | /* Interrupt */ | 189 | /* Interrupt */ |
187 | /* don't read EICR because it can clear interrupt causes, instead | 190 | /* don't read EICR because it can clear interrupt causes, instead |
188 | * read EICS which is a shadow but doesn't clear EICR */ | 191 | * read EICS which is a shadow but doesn't clear EICR |
192 | */ | ||
189 | regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); | 193 | regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); |
190 | regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); | 194 | regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); |
191 | regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); | 195 | regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); |
@@ -390,21 +394,21 @@ clear_reset: | |||
390 | 394 | ||
391 | static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) | 395 | static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) |
392 | { | 396 | { |
393 | switch (stringset) { | 397 | switch (stringset) { |
394 | case ETH_SS_TEST: | 398 | case ETH_SS_TEST: |
395 | return IXGBE_TEST_LEN; | 399 | return IXGBE_TEST_LEN; |
396 | case ETH_SS_STATS: | 400 | case ETH_SS_STATS: |
397 | return IXGBE_GLOBAL_STATS_LEN; | 401 | return IXGBE_GLOBAL_STATS_LEN; |
398 | default: | 402 | default: |
399 | return -EINVAL; | 403 | return -EINVAL; |
400 | } | 404 | } |
401 | } | 405 | } |
402 | 406 | ||
403 | static void ixgbevf_get_ethtool_stats(struct net_device *netdev, | 407 | static void ixgbevf_get_ethtool_stats(struct net_device *netdev, |
404 | struct ethtool_stats *stats, u64 *data) | 408 | struct ethtool_stats *stats, u64 *data) |
405 | { | 409 | { |
406 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | 410 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); |
407 | char *base = (char *) adapter; | 411 | char *base = (char *)adapter; |
408 | int i; | 412 | int i; |
409 | #ifdef BP_EXTENDED_STATS | 413 | #ifdef BP_EXTENDED_STATS |
410 | u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, | 414 | u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, |
@@ -594,8 +598,7 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) | |||
594 | } | 598 | } |
595 | test = reg_test_vf; | 599 | test = reg_test_vf; |
596 | 600 | ||
597 | /* | 601 | /* Perform the register test, looping through the test table |
598 | * Perform the register test, looping through the test table | ||
599 | * until we either fail or reach the null entry. | 602 | * until we either fail or reach the null entry. |
600 | */ | 603 | */ |
601 | while (test->reg) { | 604 | while (test->reg) { |
@@ -617,8 +620,8 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) | |||
617 | break; | 620 | break; |
618 | case WRITE_NO_TEST: | 621 | case WRITE_NO_TEST: |
619 | ixgbe_write_reg(&adapter->hw, | 622 | ixgbe_write_reg(&adapter->hw, |
620 | test->reg + (i * 0x40), | 623 | test->reg + (i * 0x40), |
621 | test->write); | 624 | test->write); |
622 | break; | 625 | break; |
623 | case TABLE32_TEST: | 626 | case TABLE32_TEST: |
624 | b = reg_pattern_test(adapter, data, | 627 | b = reg_pattern_test(adapter, data, |
@@ -670,7 +673,8 @@ static void ixgbevf_diag_test(struct net_device *netdev, | |||
670 | hw_dbg(&adapter->hw, "offline testing starting\n"); | 673 | hw_dbg(&adapter->hw, "offline testing starting\n"); |
671 | 674 | ||
672 | /* Link test performed before hardware reset so autoneg doesn't | 675 | /* Link test performed before hardware reset so autoneg doesn't |
673 | * interfere with test result */ | 676 | * interfere with test result |
677 | */ | ||
674 | if (ixgbevf_link_test(adapter, &data[1])) | 678 | if (ixgbevf_link_test(adapter, &data[1])) |
675 | eth_test->flags |= ETH_TEST_FL_FAILED; | 679 | eth_test->flags |= ETH_TEST_FL_FAILED; |
676 | 680 | ||
@@ -724,7 +728,7 @@ static int ixgbevf_get_coalesce(struct net_device *netdev, | |||
724 | else | 728 | else |
725 | ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; | 729 | ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; |
726 | 730 | ||
727 | /* if in mixed tx/rx queues per vector mode, report only rx settings */ | 731 | /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */ |
728 | if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) | 732 | if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) |
729 | return 0; | 733 | return 0; |
730 | 734 | ||
@@ -745,12 +749,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, | |||
745 | int num_vectors, i; | 749 | int num_vectors, i; |
746 | u16 tx_itr_param, rx_itr_param; | 750 | u16 tx_itr_param, rx_itr_param; |
747 | 751 | ||
748 | /* don't accept tx specific changes if we've got mixed RxTx vectors */ | 752 | /* don't accept Tx specific changes if we've got mixed RxTx vectors */ |
749 | if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count | 753 | if (adapter->q_vector[0]->tx.count && |
750 | && ec->tx_coalesce_usecs) | 754 | adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) |
751 | return -EINVAL; | 755 | return -EINVAL; |
752 | 756 | ||
753 | |||
754 | if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || | 757 | if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || |
755 | (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) | 758 | (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) |
756 | return -EINVAL; | 759 | return -EINVAL; |
@@ -765,7 +768,6 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, | |||
765 | else | 768 | else |
766 | rx_itr_param = adapter->rx_itr_setting; | 769 | rx_itr_param = adapter->rx_itr_setting; |
767 | 770 | ||
768 | |||
769 | if (ec->tx_coalesce_usecs > 1) | 771 | if (ec->tx_coalesce_usecs > 1) |
770 | adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; | 772 | adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; |
771 | else | 773 | else |
@@ -781,10 +783,10 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, | |||
781 | for (i = 0; i < num_vectors; i++) { | 783 | for (i = 0; i < num_vectors; i++) { |
782 | q_vector = adapter->q_vector[i]; | 784 | q_vector = adapter->q_vector[i]; |
783 | if (q_vector->tx.count && !q_vector->rx.count) | 785 | if (q_vector->tx.count && !q_vector->rx.count) |
784 | /* tx only */ | 786 | /* Tx only */ |
785 | q_vector->itr = tx_itr_param; | 787 | q_vector->itr = tx_itr_param; |
786 | else | 788 | else |
787 | /* rx only or mixed */ | 789 | /* Rx only or mixed */ |
788 | q_vector->itr = rx_itr_param; | 790 | q_vector->itr = rx_itr_param; |
789 | ixgbevf_write_eitr(q_vector); | 791 | ixgbevf_write_eitr(q_vector); |
790 | } | 792 | } |
@@ -793,22 +795,22 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, | |||
793 | } | 795 | } |
794 | 796 | ||
795 | static const struct ethtool_ops ixgbevf_ethtool_ops = { | 797 | static const struct ethtool_ops ixgbevf_ethtool_ops = { |
796 | .get_settings = ixgbevf_get_settings, | 798 | .get_settings = ixgbevf_get_settings, |
797 | .get_drvinfo = ixgbevf_get_drvinfo, | 799 | .get_drvinfo = ixgbevf_get_drvinfo, |
798 | .get_regs_len = ixgbevf_get_regs_len, | 800 | .get_regs_len = ixgbevf_get_regs_len, |
799 | .get_regs = ixgbevf_get_regs, | 801 | .get_regs = ixgbevf_get_regs, |
800 | .nway_reset = ixgbevf_nway_reset, | 802 | .nway_reset = ixgbevf_nway_reset, |
801 | .get_link = ethtool_op_get_link, | 803 | .get_link = ethtool_op_get_link, |
802 | .get_ringparam = ixgbevf_get_ringparam, | 804 | .get_ringparam = ixgbevf_get_ringparam, |
803 | .set_ringparam = ixgbevf_set_ringparam, | 805 | .set_ringparam = ixgbevf_set_ringparam, |
804 | .get_msglevel = ixgbevf_get_msglevel, | 806 | .get_msglevel = ixgbevf_get_msglevel, |
805 | .set_msglevel = ixgbevf_set_msglevel, | 807 | .set_msglevel = ixgbevf_set_msglevel, |
806 | .self_test = ixgbevf_diag_test, | 808 | .self_test = ixgbevf_diag_test, |
807 | .get_sset_count = ixgbevf_get_sset_count, | 809 | .get_sset_count = ixgbevf_get_sset_count, |
808 | .get_strings = ixgbevf_get_strings, | 810 | .get_strings = ixgbevf_get_strings, |
809 | .get_ethtool_stats = ixgbevf_get_ethtool_stats, | 811 | .get_ethtool_stats = ixgbevf_get_ethtool_stats, |
810 | .get_coalesce = ixgbevf_get_coalesce, | 812 | .get_coalesce = ixgbevf_get_coalesce, |
811 | .set_coalesce = ixgbevf_set_coalesce, | 813 | .set_coalesce = ixgbevf_set_coalesce, |
812 | }; | 814 | }; |
813 | 815 | ||
814 | void ixgbevf_set_ethtool_ops(struct net_device *netdev) | 816 | void ixgbevf_set_ethtool_ops(struct net_device *netdev) |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 3a9b356dff01..bc939a1fcb3c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2014 Intel Corporation. | 4 | Copyright(c) 1999 - 2015 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -51,7 +50,8 @@ | |||
51 | #define DESC_NEEDED (MAX_SKB_FRAGS + 4) | 50 | #define DESC_NEEDED (MAX_SKB_FRAGS + 4) |
52 | 51 | ||
53 | /* wrapper around a pointer to a socket buffer, | 52 | /* wrapper around a pointer to a socket buffer, |
54 | * so a DMA handle can be stored along with the buffer */ | 53 | * so a DMA handle can be stored along with the buffer |
54 | */ | ||
55 | struct ixgbevf_tx_buffer { | 55 | struct ixgbevf_tx_buffer { |
56 | union ixgbe_adv_tx_desc *next_to_watch; | 56 | union ixgbe_adv_tx_desc *next_to_watch; |
57 | unsigned long time_stamp; | 57 | unsigned long time_stamp; |
@@ -132,9 +132,10 @@ struct ixgbevf_ring { | |||
132 | u8 __iomem *tail; | 132 | u8 __iomem *tail; |
133 | struct sk_buff *skb; | 133 | struct sk_buff *skb; |
134 | 134 | ||
135 | u16 reg_idx; /* holds the special value that gets the hardware register | 135 | /* holds the special value that gets the hardware register offset |
136 | * offset associated with this ring, which is different | 136 | * associated with this ring, which is different for DCB and RSS modes |
137 | * for DCB and RSS modes */ | 137 | */ |
138 | u16 reg_idx; | ||
138 | int queue_index; /* needed for multiqueue queue management */ | 139 | int queue_index; /* needed for multiqueue queue management */ |
139 | }; | 140 | }; |
140 | 141 | ||
@@ -143,21 +144,21 @@ struct ixgbevf_ring { | |||
143 | 144 | ||
144 | #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES | 145 | #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES |
145 | #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES | 146 | #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES |
146 | #define IXGBEVF_MAX_RSS_QUEUES 2 | 147 | #define IXGBEVF_MAX_RSS_QUEUES 2 |
147 | 148 | ||
148 | #define IXGBEVF_DEFAULT_TXD 1024 | 149 | #define IXGBEVF_DEFAULT_TXD 1024 |
149 | #define IXGBEVF_DEFAULT_RXD 512 | 150 | #define IXGBEVF_DEFAULT_RXD 512 |
150 | #define IXGBEVF_MAX_TXD 4096 | 151 | #define IXGBEVF_MAX_TXD 4096 |
151 | #define IXGBEVF_MIN_TXD 64 | 152 | #define IXGBEVF_MIN_TXD 64 |
152 | #define IXGBEVF_MAX_RXD 4096 | 153 | #define IXGBEVF_MAX_RXD 4096 |
153 | #define IXGBEVF_MIN_RXD 64 | 154 | #define IXGBEVF_MIN_RXD 64 |
154 | 155 | ||
155 | /* Supported Rx Buffer Sizes */ | 156 | /* Supported Rx Buffer Sizes */ |
156 | #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ | 157 | #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ |
157 | #define IXGBEVF_RXBUFFER_2048 2048 | 158 | #define IXGBEVF_RXBUFFER_2048 2048 |
158 | 159 | ||
159 | #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 | 160 | #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 |
160 | #define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 | 161 | #define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 |
161 | 162 | ||
162 | #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) | 163 | #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) |
163 | 164 | ||
@@ -186,10 +187,11 @@ struct ixgbevf_ring_container { | |||
186 | */ | 187 | */ |
187 | struct ixgbevf_q_vector { | 188 | struct ixgbevf_q_vector { |
188 | struct ixgbevf_adapter *adapter; | 189 | struct ixgbevf_adapter *adapter; |
189 | u16 v_idx; /* index of q_vector within array, also used for | 190 | /* index of q_vector within array, also used for finding the bit in |
190 | * finding the bit in EICR and friends that | 191 | * EICR and friends that represents the vector for this ring |
191 | * represents the vector for this ring */ | 192 | */ |
192 | u16 itr; /* Interrupt throttle rate written to EITR */ | 193 | u16 v_idx; |
194 | u16 itr; /* Interrupt throttle rate written to EITR */ | ||
193 | struct napi_struct napi; | 195 | struct napi_struct napi; |
194 | struct ixgbevf_ring_container rx, tx; | 196 | struct ixgbevf_ring_container rx, tx; |
195 | char name[IFNAMSIZ + 9]; | 197 | char name[IFNAMSIZ + 9]; |
@@ -199,19 +201,21 @@ struct ixgbevf_q_vector { | |||
199 | #define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ | 201 | #define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ |
200 | #define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ | 202 | #define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ |
201 | #define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ | 203 | #define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ |
202 | #define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) | 204 | #define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) |
203 | #define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) | 205 | #define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) |
204 | #define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ | 206 | #define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ |
205 | #define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ | 207 | #define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ |
206 | #define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD) | 208 | #define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \ |
207 | #define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD) | 209 | IXGBEVF_QV_STATE_POLL_YIELD) |
210 | #define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \ | ||
211 | IXGBEVF_QV_STATE_POLL_YIELD) | ||
208 | spinlock_t lock; | 212 | spinlock_t lock; |
209 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | 213 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
210 | }; | 214 | }; |
215 | |||
211 | #ifdef CONFIG_NET_RX_BUSY_POLL | 216 | #ifdef CONFIG_NET_RX_BUSY_POLL |
212 | static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) | 217 | static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) |
213 | { | 218 | { |
214 | |||
215 | spin_lock_init(&q_vector->lock); | 219 | spin_lock_init(&q_vector->lock); |
216 | q_vector->state = IXGBEVF_QV_STATE_IDLE; | 220 | q_vector->state = IXGBEVF_QV_STATE_IDLE; |
217 | } | 221 | } |
@@ -220,6 +224,7 @@ static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) | |||
220 | static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) | 224 | static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) |
221 | { | 225 | { |
222 | int rc = true; | 226 | int rc = true; |
227 | |||
223 | spin_lock_bh(&q_vector->lock); | 228 | spin_lock_bh(&q_vector->lock); |
224 | if (q_vector->state & IXGBEVF_QV_LOCKED) { | 229 | if (q_vector->state & IXGBEVF_QV_LOCKED) { |
225 | WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); | 230 | WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); |
@@ -240,6 +245,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) | |||
240 | static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) | 245 | static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) |
241 | { | 246 | { |
242 | int rc = false; | 247 | int rc = false; |
248 | |||
243 | spin_lock_bh(&q_vector->lock); | 249 | spin_lock_bh(&q_vector->lock); |
244 | WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | | 250 | WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | |
245 | IXGBEVF_QV_STATE_NAPI_YIELD)); | 251 | IXGBEVF_QV_STATE_NAPI_YIELD)); |
@@ -256,6 +262,7 @@ static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) | |||
256 | static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) | 262 | static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) |
257 | { | 263 | { |
258 | int rc = true; | 264 | int rc = true; |
265 | |||
259 | spin_lock_bh(&q_vector->lock); | 266 | spin_lock_bh(&q_vector->lock); |
260 | if ((q_vector->state & IXGBEVF_QV_LOCKED)) { | 267 | if ((q_vector->state & IXGBEVF_QV_LOCKED)) { |
261 | q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; | 268 | q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; |
@@ -275,6 +282,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) | |||
275 | static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) | 282 | static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) |
276 | { | 283 | { |
277 | int rc = false; | 284 | int rc = false; |
285 | |||
278 | spin_lock_bh(&q_vector->lock); | 286 | spin_lock_bh(&q_vector->lock); |
279 | WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); | 287 | WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); |
280 | 288 | ||
@@ -297,6 +305,7 @@ static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) | |||
297 | static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) | 305 | static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) |
298 | { | 306 | { |
299 | int rc = true; | 307 | int rc = true; |
308 | |||
300 | spin_lock_bh(&q_vector->lock); | 309 | spin_lock_bh(&q_vector->lock); |
301 | if (q_vector->state & IXGBEVF_QV_OWNED) | 310 | if (q_vector->state & IXGBEVF_QV_OWNED) |
302 | rc = false; | 311 | rc = false; |
@@ -307,8 +316,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) | |||
307 | 316 | ||
308 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | 317 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
309 | 318 | ||
310 | /* | 319 | /* microsecond values for various ITR rates shifted by 2 to fit itr register |
311 | * microsecond values for various ITR rates shifted by 2 to fit itr register | ||
312 | * with the first 3 bits reserved 0 | 320 | * with the first 3 bits reserved 0 |
313 | */ | 321 | */ |
314 | #define IXGBE_MIN_RSC_ITR 24 | 322 | #define IXGBE_MIN_RSC_ITR 24 |
@@ -345,22 +353,22 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) | |||
345 | writel(value, ring->tail); | 353 | writel(value, ring->tail); |
346 | } | 354 | } |
347 | 355 | ||
348 | #define IXGBEVF_RX_DESC(R, i) \ | 356 | #define IXGBEVF_RX_DESC(R, i) \ |
349 | (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) | 357 | (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) |
350 | #define IXGBEVF_TX_DESC(R, i) \ | 358 | #define IXGBEVF_TX_DESC(R, i) \ |
351 | (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) | 359 | (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) |
352 | #define IXGBEVF_TX_CTXTDESC(R, i) \ | 360 | #define IXGBEVF_TX_CTXTDESC(R, i) \ |
353 | (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) | 361 | (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) |
354 | 362 | ||
355 | #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ | 363 | #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ |
356 | 364 | ||
357 | #define OTHER_VECTOR 1 | 365 | #define OTHER_VECTOR 1 |
358 | #define NON_Q_VECTORS (OTHER_VECTOR) | 366 | #define NON_Q_VECTORS (OTHER_VECTOR) |
359 | 367 | ||
360 | #define MAX_MSIX_Q_VECTORS 2 | 368 | #define MAX_MSIX_Q_VECTORS 2 |
361 | 369 | ||
362 | #define MIN_MSIX_Q_VECTORS 1 | 370 | #define MIN_MSIX_Q_VECTORS 1 |
363 | #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) | 371 | #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) |
364 | 372 | ||
365 | /* board specific private data structure */ | 373 | /* board specific private data structure */ |
366 | struct ixgbevf_adapter { | 374 | struct ixgbevf_adapter { |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 4186981e562d..4ee15adb3bd9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2014 Intel Corporation. | 4 | Copyright(c) 1999 - 2015 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -25,7 +24,6 @@ | |||
25 | 24 | ||
26 | *******************************************************************************/ | 25 | *******************************************************************************/ |
27 | 26 | ||
28 | |||
29 | /****************************************************************************** | 27 | /****************************************************************************** |
30 | Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code | 28 | Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code |
31 | ******************************************************************************/ | 29 | ******************************************************************************/ |
@@ -170,12 +168,13 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) | |||
170 | * @direction: 0 for Rx, 1 for Tx, -1 for other causes | 168 | * @direction: 0 for Rx, 1 for Tx, -1 for other causes |
171 | * @queue: queue to map the corresponding interrupt to | 169 | * @queue: queue to map the corresponding interrupt to |
172 | * @msix_vector: the vector to map to the corresponding queue | 170 | * @msix_vector: the vector to map to the corresponding queue |
173 | */ | 171 | **/ |
174 | static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, | 172 | static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, |
175 | u8 queue, u8 msix_vector) | 173 | u8 queue, u8 msix_vector) |
176 | { | 174 | { |
177 | u32 ivar, index; | 175 | u32 ivar, index; |
178 | struct ixgbe_hw *hw = &adapter->hw; | 176 | struct ixgbe_hw *hw = &adapter->hw; |
177 | |||
179 | if (direction == -1) { | 178 | if (direction == -1) { |
180 | /* other causes */ | 179 | /* other causes */ |
181 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; | 180 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; |
@@ -184,7 +183,7 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, | |||
184 | ivar |= msix_vector; | 183 | ivar |= msix_vector; |
185 | IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); | 184 | IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); |
186 | } else { | 185 | } else { |
187 | /* tx or rx causes */ | 186 | /* Tx or Rx causes */ |
188 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; | 187 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; |
189 | index = ((16 * (queue & 1)) + (8 * direction)); | 188 | index = ((16 * (queue & 1)) + (8 * direction)); |
190 | ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); | 189 | ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); |
@@ -458,11 +457,12 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, | |||
458 | napi_gro_receive(&q_vector->napi, skb); | 457 | napi_gro_receive(&q_vector->napi, skb); |
459 | } | 458 | } |
460 | 459 | ||
461 | /* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum | 460 | /** |
461 | * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum | ||
462 | * @ring: structure containig ring specific data | 462 | * @ring: structure containig ring specific data |
463 | * @rx_desc: current Rx descriptor being processed | 463 | * @rx_desc: current Rx descriptor being processed |
464 | * @skb: skb currently being received and modified | 464 | * @skb: skb currently being received and modified |
465 | */ | 465 | **/ |
466 | static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, | 466 | static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, |
467 | union ixgbe_adv_rx_desc *rx_desc, | 467 | union ixgbe_adv_rx_desc *rx_desc, |
468 | struct sk_buff *skb) | 468 | struct sk_buff *skb) |
@@ -492,7 +492,8 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, | |||
492 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 492 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
493 | } | 493 | } |
494 | 494 | ||
495 | /* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor | 495 | /** |
496 | * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor | ||
496 | * @rx_ring: rx descriptor ring packet is being transacted on | 497 | * @rx_ring: rx descriptor ring packet is being transacted on |
497 | * @rx_desc: pointer to the EOP Rx descriptor | 498 | * @rx_desc: pointer to the EOP Rx descriptor |
498 | * @skb: pointer to current skb being populated | 499 | * @skb: pointer to current skb being populated |
@@ -500,7 +501,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, | |||
500 | * This function checks the ring, descriptor, and packet information in | 501 | * This function checks the ring, descriptor, and packet information in |
501 | * order to populate the checksum, VLAN, protocol, and other fields within | 502 | * order to populate the checksum, VLAN, protocol, and other fields within |
502 | * the skb. | 503 | * the skb. |
503 | */ | 504 | **/ |
504 | static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, | 505 | static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, |
505 | union ixgbe_adv_rx_desc *rx_desc, | 506 | union ixgbe_adv_rx_desc *rx_desc, |
506 | struct sk_buff *skb) | 507 | struct sk_buff *skb) |
@@ -647,7 +648,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, | |||
647 | } | 648 | } |
648 | } | 649 | } |
649 | 650 | ||
650 | /* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail | 651 | /** |
652 | * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail | ||
651 | * @rx_ring: rx descriptor ring packet is being transacted on | 653 | * @rx_ring: rx descriptor ring packet is being transacted on |
652 | * @skb: pointer to current skb being adjusted | 654 | * @skb: pointer to current skb being adjusted |
653 | * | 655 | * |
@@ -657,7 +659,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, | |||
657 | * that allow for significant optimizations versus the standard function. | 659 | * that allow for significant optimizations versus the standard function. |
658 | * As a result we can do things like drop a frag and maintain an accurate | 660 | * As a result we can do things like drop a frag and maintain an accurate |
659 | * truesize for the skb. | 661 | * truesize for the skb. |
660 | */ | 662 | **/ |
661 | static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, | 663 | static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, |
662 | struct sk_buff *skb) | 664 | struct sk_buff *skb) |
663 | { | 665 | { |
@@ -686,7 +688,8 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, | |||
686 | skb->tail += pull_len; | 688 | skb->tail += pull_len; |
687 | } | 689 | } |
688 | 690 | ||
689 | /* ixgbevf_cleanup_headers - Correct corrupted or empty headers | 691 | /** |
692 | * ixgbevf_cleanup_headers - Correct corrupted or empty headers | ||
690 | * @rx_ring: rx descriptor ring packet is being transacted on | 693 | * @rx_ring: rx descriptor ring packet is being transacted on |
691 | * @rx_desc: pointer to the EOP Rx descriptor | 694 | * @rx_desc: pointer to the EOP Rx descriptor |
692 | * @skb: pointer to current skb being fixed | 695 | * @skb: pointer to current skb being fixed |
@@ -702,7 +705,7 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, | |||
702 | * it is large enough to qualify as a valid Ethernet frame. | 705 | * it is large enough to qualify as a valid Ethernet frame. |
703 | * | 706 | * |
704 | * Returns true if an error was encountered and skb was freed. | 707 | * Returns true if an error was encountered and skb was freed. |
705 | */ | 708 | **/ |
706 | static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, | 709 | static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, |
707 | union ixgbe_adv_rx_desc *rx_desc, | 710 | union ixgbe_adv_rx_desc *rx_desc, |
708 | struct sk_buff *skb) | 711 | struct sk_buff *skb) |
@@ -729,12 +732,13 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, | |||
729 | return false; | 732 | return false; |
730 | } | 733 | } |
731 | 734 | ||
732 | /* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring | 735 | /** |
736 | * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring | ||
733 | * @rx_ring: rx descriptor ring to store buffers on | 737 | * @rx_ring: rx descriptor ring to store buffers on |
734 | * @old_buff: donor buffer to have page reused | 738 | * @old_buff: donor buffer to have page reused |
735 | * | 739 | * |
736 | * Synchronizes page for reuse by the adapter | 740 | * Synchronizes page for reuse by the adapter |
737 | */ | 741 | **/ |
738 | static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, | 742 | static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, |
739 | struct ixgbevf_rx_buffer *old_buff) | 743 | struct ixgbevf_rx_buffer *old_buff) |
740 | { | 744 | { |
@@ -764,7 +768,8 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) | |||
764 | return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; | 768 | return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; |
765 | } | 769 | } |
766 | 770 | ||
767 | /* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff | 771 | /** |
772 | * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff | ||
768 | * @rx_ring: rx descriptor ring to transact packets on | 773 | * @rx_ring: rx descriptor ring to transact packets on |
769 | * @rx_buffer: buffer containing page to add | 774 | * @rx_buffer: buffer containing page to add |
770 | * @rx_desc: descriptor containing length of buffer written by hardware | 775 | * @rx_desc: descriptor containing length of buffer written by hardware |
@@ -777,7 +782,7 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) | |||
777 | * | 782 | * |
778 | * The function will then update the page offset if necessary and return | 783 | * The function will then update the page offset if necessary and return |
779 | * true if the buffer can be reused by the adapter. | 784 | * true if the buffer can be reused by the adapter. |
780 | */ | 785 | **/ |
781 | static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, | 786 | static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, |
782 | struct ixgbevf_rx_buffer *rx_buffer, | 787 | struct ixgbevf_rx_buffer *rx_buffer, |
783 | union ixgbe_adv_rx_desc *rx_desc, | 788 | union ixgbe_adv_rx_desc *rx_desc, |
@@ -958,7 +963,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, | |||
958 | * source pruning. | 963 | * source pruning. |
959 | */ | 964 | */ |
960 | if ((skb->pkt_type == PACKET_BROADCAST || | 965 | if ((skb->pkt_type == PACKET_BROADCAST || |
961 | skb->pkt_type == PACKET_MULTICAST) && | 966 | skb->pkt_type == PACKET_MULTICAST) && |
962 | ether_addr_equal(rx_ring->netdev->dev_addr, | 967 | ether_addr_equal(rx_ring->netdev->dev_addr, |
963 | eth_hdr(skb)->h_source)) { | 968 | eth_hdr(skb)->h_source)) { |
964 | dev_kfree_skb_irq(skb); | 969 | dev_kfree_skb_irq(skb); |
@@ -1016,7 +1021,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) | |||
1016 | #endif | 1021 | #endif |
1017 | 1022 | ||
1018 | /* attempt to distribute budget to each queue fairly, but don't allow | 1023 | /* attempt to distribute budget to each queue fairly, but don't allow |
1019 | * the budget to go below 1 because we'll exit polling */ | 1024 | * the budget to go below 1 because we'll exit polling |
1025 | */ | ||
1020 | if (q_vector->rx.count > 1) | 1026 | if (q_vector->rx.count > 1) |
1021 | per_ring_budget = max(budget/q_vector->rx.count, 1); | 1027 | per_ring_budget = max(budget/q_vector->rx.count, 1); |
1022 | else | 1028 | else |
@@ -1049,7 +1055,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) | |||
1049 | /** | 1055 | /** |
1050 | * ixgbevf_write_eitr - write VTEITR register in hardware specific way | 1056 | * ixgbevf_write_eitr - write VTEITR register in hardware specific way |
1051 | * @q_vector: structure containing interrupt and ring information | 1057 | * @q_vector: structure containing interrupt and ring information |
1052 | */ | 1058 | **/ |
1053 | void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) | 1059 | void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) |
1054 | { | 1060 | { |
1055 | struct ixgbevf_adapter *adapter = q_vector->adapter; | 1061 | struct ixgbevf_adapter *adapter = q_vector->adapter; |
@@ -1057,8 +1063,7 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) | |||
1057 | int v_idx = q_vector->v_idx; | 1063 | int v_idx = q_vector->v_idx; |
1058 | u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; | 1064 | u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; |
1059 | 1065 | ||
1060 | /* | 1066 | /* set the WDIS bit to not clear the timer bits and cause an |
1061 | * set the WDIS bit to not clear the timer bits and cause an | ||
1062 | * immediate assertion of the interrupt | 1067 | * immediate assertion of the interrupt |
1063 | */ | 1068 | */ |
1064 | itr_reg |= IXGBE_EITR_CNT_WDIS; | 1069 | itr_reg |= IXGBE_EITR_CNT_WDIS; |
@@ -1115,12 +1120,12 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) | |||
1115 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 1120 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
1116 | adapter->eims_enable_mask = 0; | 1121 | adapter->eims_enable_mask = 0; |
1117 | 1122 | ||
1118 | /* | 1123 | /* Populate the IVAR table and set the ITR values to the |
1119 | * Populate the IVAR table and set the ITR values to the | ||
1120 | * corresponding register. | 1124 | * corresponding register. |
1121 | */ | 1125 | */ |
1122 | for (v_idx = 0; v_idx < q_vectors; v_idx++) { | 1126 | for (v_idx = 0; v_idx < q_vectors; v_idx++) { |
1123 | struct ixgbevf_ring *ring; | 1127 | struct ixgbevf_ring *ring; |
1128 | |||
1124 | q_vector = adapter->q_vector[v_idx]; | 1129 | q_vector = adapter->q_vector[v_idx]; |
1125 | 1130 | ||
1126 | ixgbevf_for_each_ring(ring, q_vector->rx) | 1131 | ixgbevf_for_each_ring(ring, q_vector->rx) |
@@ -1130,13 +1135,13 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) | |||
1130 | ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); | 1135 | ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); |
1131 | 1136 | ||
1132 | if (q_vector->tx.ring && !q_vector->rx.ring) { | 1137 | if (q_vector->tx.ring && !q_vector->rx.ring) { |
1133 | /* tx only vector */ | 1138 | /* Tx only vector */ |
1134 | if (adapter->tx_itr_setting == 1) | 1139 | if (adapter->tx_itr_setting == 1) |
1135 | q_vector->itr = IXGBE_10K_ITR; | 1140 | q_vector->itr = IXGBE_10K_ITR; |
1136 | else | 1141 | else |
1137 | q_vector->itr = adapter->tx_itr_setting; | 1142 | q_vector->itr = adapter->tx_itr_setting; |
1138 | } else { | 1143 | } else { |
1139 | /* rx or rx/tx vector */ | 1144 | /* Rx or Rx/Tx vector */ |
1140 | if (adapter->rx_itr_setting == 1) | 1145 | if (adapter->rx_itr_setting == 1) |
1141 | q_vector->itr = IXGBE_20K_ITR; | 1146 | q_vector->itr = IXGBE_20K_ITR; |
1142 | else | 1147 | else |
@@ -1167,13 +1172,13 @@ enum latency_range { | |||
1167 | * @q_vector: structure containing interrupt and ring information | 1172 | * @q_vector: structure containing interrupt and ring information |
1168 | * @ring_container: structure containing ring performance data | 1173 | * @ring_container: structure containing ring performance data |
1169 | * | 1174 | * |
1170 | * Stores a new ITR value based on packets and byte | 1175 | * Stores a new ITR value based on packets and byte |
1171 | * counts during the last interrupt. The advantage of per interrupt | 1176 | * counts during the last interrupt. The advantage of per interrupt |
1172 | * computation is faster updates and more accurate ITR for the current | 1177 | * computation is faster updates and more accurate ITR for the current |
1173 | * traffic pattern. Constants in this function were computed | 1178 | * traffic pattern. Constants in this function were computed |
1174 | * based on theoretical maximum wire speed and thresholds were set based | 1179 | * based on theoretical maximum wire speed and thresholds were set based |
1175 | * on testing data as well as attempting to minimize response time | 1180 | * on testing data as well as attempting to minimize response time |
1176 | * while increasing bulk throughput. | 1181 | * while increasing bulk throughput. |
1177 | **/ | 1182 | **/ |
1178 | static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, | 1183 | static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, |
1179 | struct ixgbevf_ring_container *ring_container) | 1184 | struct ixgbevf_ring_container *ring_container) |
@@ -1187,7 +1192,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, | |||
1187 | if (packets == 0) | 1192 | if (packets == 0) |
1188 | return; | 1193 | return; |
1189 | 1194 | ||
1190 | /* simple throttlerate management | 1195 | /* simple throttle rate management |
1191 | * 0-20MB/s lowest (100000 ints/s) | 1196 | * 0-20MB/s lowest (100000 ints/s) |
1192 | * 20-100MB/s low (20000 ints/s) | 1197 | * 20-100MB/s low (20000 ints/s) |
1193 | * 100-1249MB/s bulk (8000 ints/s) | 1198 | * 100-1249MB/s bulk (8000 ints/s) |
@@ -1330,8 +1335,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) | |||
1330 | 1335 | ||
1331 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 1336 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
1332 | 1337 | ||
1333 | /* | 1338 | /* The ideal configuration... |
1334 | * The ideal configuration... | ||
1335 | * We have enough vectors to map one per queue. | 1339 | * We have enough vectors to map one per queue. |
1336 | */ | 1340 | */ |
1337 | if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { | 1341 | if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { |
@@ -1343,8 +1347,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) | |||
1343 | goto out; | 1347 | goto out; |
1344 | } | 1348 | } |
1345 | 1349 | ||
1346 | /* | 1350 | /* If we don't have enough vectors for a 1-to-1 |
1347 | * If we don't have enough vectors for a 1-to-1 | ||
1348 | * mapping, we'll have to group them so there are | 1351 | * mapping, we'll have to group them so there are |
1349 | * multiple queues per vector. | 1352 | * multiple queues per vector. |
1350 | */ | 1353 | */ |
@@ -1406,8 +1409,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) | |||
1406 | q_vector->name, q_vector); | 1409 | q_vector->name, q_vector); |
1407 | if (err) { | 1410 | if (err) { |
1408 | hw_dbg(&adapter->hw, | 1411 | hw_dbg(&adapter->hw, |
1409 | "request_irq failed for MSIX interrupt " | 1412 | "request_irq failed for MSIX interrupt Error: %d\n", |
1410 | "Error: %d\n", err); | 1413 | err); |
1411 | goto free_queue_irqs; | 1414 | goto free_queue_irqs; |
1412 | } | 1415 | } |
1413 | } | 1416 | } |
@@ -1415,8 +1418,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) | |||
1415 | err = request_irq(adapter->msix_entries[vector].vector, | 1418 | err = request_irq(adapter->msix_entries[vector].vector, |
1416 | &ixgbevf_msix_other, 0, netdev->name, adapter); | 1419 | &ixgbevf_msix_other, 0, netdev->name, adapter); |
1417 | if (err) { | 1420 | if (err) { |
1418 | hw_dbg(&adapter->hw, | 1421 | hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", |
1419 | "request_irq for msix_other failed: %d\n", err); | 1422 | err); |
1420 | goto free_queue_irqs; | 1423 | goto free_queue_irqs; |
1421 | } | 1424 | } |
1422 | 1425 | ||
@@ -1448,6 +1451,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) | |||
1448 | 1451 | ||
1449 | for (i = 0; i < q_vectors; i++) { | 1452 | for (i = 0; i < q_vectors; i++) { |
1450 | struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; | 1453 | struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; |
1454 | |||
1451 | q_vector->rx.ring = NULL; | 1455 | q_vector->rx.ring = NULL; |
1452 | q_vector->tx.ring = NULL; | 1456 | q_vector->tx.ring = NULL; |
1453 | q_vector->rx.count = 0; | 1457 | q_vector->rx.count = 0; |
@@ -1469,8 +1473,7 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) | |||
1469 | err = ixgbevf_request_msix_irqs(adapter); | 1473 | err = ixgbevf_request_msix_irqs(adapter); |
1470 | 1474 | ||
1471 | if (err) | 1475 | if (err) |
1472 | hw_dbg(&adapter->hw, | 1476 | hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); |
1473 | "request_irq failed, Error %d\n", err); | ||
1474 | 1477 | ||
1475 | return err; | 1478 | return err; |
1476 | } | 1479 | } |
@@ -1659,7 +1662,7 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, | |||
1659 | /* write value back with RXDCTL.ENABLE bit cleared */ | 1662 | /* write value back with RXDCTL.ENABLE bit cleared */ |
1660 | IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); | 1663 | IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); |
1661 | 1664 | ||
1662 | /* the hardware may take up to 100us to really disable the rx queue */ | 1665 | /* the hardware may take up to 100us to really disable the Rx queue */ |
1663 | do { | 1666 | do { |
1664 | udelay(10); | 1667 | udelay(10); |
1665 | rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); | 1668 | rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); |
@@ -1786,7 +1789,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) | |||
1786 | ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); | 1789 | ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); |
1787 | 1790 | ||
1788 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 1791 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
1789 | * the Base and Length of the Rx Descriptor Ring */ | 1792 | * the Base and Length of the Rx Descriptor Ring |
1793 | */ | ||
1790 | for (i = 0; i < adapter->num_rx_queues; i++) | 1794 | for (i = 0; i < adapter->num_rx_queues; i++) |
1791 | ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); | 1795 | ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); |
1792 | } | 1796 | } |
@@ -1858,14 +1862,14 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) | |||
1858 | 1862 | ||
1859 | if (!netdev_uc_empty(netdev)) { | 1863 | if (!netdev_uc_empty(netdev)) { |
1860 | struct netdev_hw_addr *ha; | 1864 | struct netdev_hw_addr *ha; |
1865 | |||
1861 | netdev_for_each_uc_addr(ha, netdev) { | 1866 | netdev_for_each_uc_addr(ha, netdev) { |
1862 | hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); | 1867 | hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); |
1863 | udelay(200); | 1868 | udelay(200); |
1864 | } | 1869 | } |
1865 | } else { | 1870 | } else { |
1866 | /* | 1871 | /* If the list is empty then send message to PF driver to |
1867 | * If the list is empty then send message to PF driver to | 1872 | * clear all MAC VLANs on this VF. |
1868 | * clear all macvlans on this VF. | ||
1869 | */ | 1873 | */ |
1870 | hw->mac.ops.set_uc_addr(hw, 0, NULL); | 1874 | hw->mac.ops.set_uc_addr(hw, 0, NULL); |
1871 | } | 1875 | } |
@@ -2184,7 +2188,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) | |||
2184 | if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) | 2188 | if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) |
2185 | return; /* do nothing if already down */ | 2189 | return; /* do nothing if already down */ |
2186 | 2190 | ||
2187 | /* disable all enabled rx queues */ | 2191 | /* disable all enabled Rx queues */ |
2188 | for (i = 0; i < adapter->num_rx_queues; i++) | 2192 | for (i = 0; i < adapter->num_rx_queues; i++) |
2189 | ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); | 2193 | ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); |
2190 | 2194 | ||
@@ -2406,8 +2410,7 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) | |||
2406 | int err = 0; | 2410 | int err = 0; |
2407 | int vector, v_budget; | 2411 | int vector, v_budget; |
2408 | 2412 | ||
2409 | /* | 2413 | /* It's easy to be greedy for MSI-X vectors, but it really |
2410 | * It's easy to be greedy for MSI-X vectors, but it really | ||
2411 | * doesn't do us much good if we have a lot more vectors | 2414 | * doesn't do us much good if we have a lot more vectors |
2412 | * than CPU's. So let's be conservative and only ask for | 2415 | * than CPU's. So let's be conservative and only ask for |
2413 | * (roughly) the same number of vectors as there are CPU's. | 2416 | * (roughly) the same number of vectors as there are CPU's. |
@@ -2418,7 +2421,8 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) | |||
2418 | v_budget += NON_Q_VECTORS; | 2421 | v_budget += NON_Q_VECTORS; |
2419 | 2422 | ||
2420 | /* A failure in MSI-X entry allocation isn't fatal, but it does | 2423 | /* A failure in MSI-X entry allocation isn't fatal, but it does |
2421 | * mean we disable MSI-X capabilities of the adapter. */ | 2424 | * mean we disable MSI-X capabilities of the adapter. |
2425 | */ | ||
2422 | adapter->msix_entries = kcalloc(v_budget, | 2426 | adapter->msix_entries = kcalloc(v_budget, |
2423 | sizeof(struct msix_entry), GFP_KERNEL); | 2427 | sizeof(struct msix_entry), GFP_KERNEL); |
2424 | if (!adapter->msix_entries) { | 2428 | if (!adapter->msix_entries) { |
@@ -2544,8 +2548,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) | |||
2544 | 2548 | ||
2545 | err = ixgbevf_alloc_q_vectors(adapter); | 2549 | err = ixgbevf_alloc_q_vectors(adapter); |
2546 | if (err) { | 2550 | if (err) { |
2547 | hw_dbg(&adapter->hw, "Unable to allocate memory for queue " | 2551 | hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); |
2548 | "vectors\n"); | ||
2549 | goto err_alloc_q_vectors; | 2552 | goto err_alloc_q_vectors; |
2550 | } | 2553 | } |
2551 | 2554 | ||
@@ -2555,8 +2558,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) | |||
2555 | goto err_alloc_queues; | 2558 | goto err_alloc_queues; |
2556 | } | 2559 | } |
2557 | 2560 | ||
2558 | hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " | 2561 | hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", |
2559 | "Tx Queue count = %u\n", | ||
2560 | (adapter->num_rx_queues > 1) ? "Enabled" : | 2562 | (adapter->num_rx_queues > 1) ? "Enabled" : |
2561 | "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); | 2563 | "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); |
2562 | 2564 | ||
@@ -2600,7 +2602,6 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) | |||
2600 | 2602 | ||
2601 | /** | 2603 | /** |
2602 | * ixgbevf_sw_init - Initialize general software structures | 2604 | * ixgbevf_sw_init - Initialize general software structures |
2603 | * (struct ixgbevf_adapter) | ||
2604 | * @adapter: board private structure to initialize | 2605 | * @adapter: board private structure to initialize |
2605 | * | 2606 | * |
2606 | * ixgbevf_sw_init initializes the Adapter private data structure. | 2607 | * ixgbevf_sw_init initializes the Adapter private data structure. |
@@ -2615,7 +2616,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) | |||
2615 | int err; | 2616 | int err; |
2616 | 2617 | ||
2617 | /* PCI config space info */ | 2618 | /* PCI config space info */ |
2618 | |||
2619 | hw->vendor_id = pdev->vendor; | 2619 | hw->vendor_id = pdev->vendor; |
2620 | hw->device_id = pdev->device; | 2620 | hw->device_id = pdev->device; |
2621 | hw->revision_id = pdev->revision; | 2621 | hw->revision_id = pdev->revision; |
@@ -2686,8 +2686,8 @@ out: | |||
2686 | { \ | 2686 | { \ |
2687 | u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ | 2687 | u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ |
2688 | u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ | 2688 | u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ |
2689 | u64 current_counter = (current_counter_msb << 32) | \ | 2689 | u64 current_counter = (current_counter_msb << 32) | \ |
2690 | current_counter_lsb; \ | 2690 | current_counter_lsb; \ |
2691 | if (current_counter < last_counter) \ | 2691 | if (current_counter < last_counter) \ |
2692 | counter += 0x1000000000LL; \ | 2692 | counter += 0x1000000000LL; \ |
2693 | last_counter = current_counter; \ | 2693 | last_counter = current_counter; \ |
@@ -2758,14 +2758,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) | |||
2758 | ixgbevf_reinit_locked(adapter); | 2758 | ixgbevf_reinit_locked(adapter); |
2759 | } | 2759 | } |
2760 | 2760 | ||
2761 | /* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts | 2761 | /** |
2762 | * @adapter - pointer to the device adapter structure | 2762 | * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts |
2763 | * @adapter: pointer to the device adapter structure | ||
2763 | * | 2764 | * |
2764 | * This function serves two purposes. First it strobes the interrupt lines | 2765 | * This function serves two purposes. First it strobes the interrupt lines |
2765 | * in order to make certain interrupts are occurring. Secondly it sets the | 2766 | * in order to make certain interrupts are occurring. Secondly it sets the |
2766 | * bits needed to check for TX hangs. As a result we should immediately | 2767 | * bits needed to check for TX hangs. As a result we should immediately |
2767 | * determine if a hang has occurred. | 2768 | * determine if a hang has occurred. |
2768 | */ | 2769 | **/ |
2769 | static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) | 2770 | static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) |
2770 | { | 2771 | { |
2771 | struct ixgbe_hw *hw = &adapter->hw; | 2772 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -2783,7 +2784,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) | |||
2783 | set_check_for_tx_hang(adapter->tx_ring[i]); | 2784 | set_check_for_tx_hang(adapter->tx_ring[i]); |
2784 | } | 2785 | } |
2785 | 2786 | ||
2786 | /* get one bit for every active tx/rx interrupt vector */ | 2787 | /* get one bit for every active Tx/Rx interrupt vector */ |
2787 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { | 2788 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { |
2788 | struct ixgbevf_q_vector *qv = adapter->q_vector[i]; | 2789 | struct ixgbevf_q_vector *qv = adapter->q_vector[i]; |
2789 | 2790 | ||
@@ -2797,7 +2798,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) | |||
2797 | 2798 | ||
2798 | /** | 2799 | /** |
2799 | * ixgbevf_watchdog_update_link - update the link status | 2800 | * ixgbevf_watchdog_update_link - update the link status |
2800 | * @adapter - pointer to the device adapter structure | 2801 | * @adapter: pointer to the device adapter structure |
2801 | **/ | 2802 | **/ |
2802 | static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) | 2803 | static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) |
2803 | { | 2804 | { |
@@ -2825,7 +2826,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) | |||
2825 | /** | 2826 | /** |
2826 | * ixgbevf_watchdog_link_is_up - update netif_carrier status and | 2827 | * ixgbevf_watchdog_link_is_up - update netif_carrier status and |
2827 | * print link up message | 2828 | * print link up message |
2828 | * @adapter - pointer to the device adapter structure | 2829 | * @adapter: pointer to the device adapter structure |
2829 | **/ | 2830 | **/ |
2830 | static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) | 2831 | static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) |
2831 | { | 2832 | { |
@@ -2850,7 +2851,7 @@ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) | |||
2850 | /** | 2851 | /** |
2851 | * ixgbevf_watchdog_link_is_down - update netif_carrier status and | 2852 | * ixgbevf_watchdog_link_is_down - update netif_carrier status and |
2852 | * print link down message | 2853 | * print link down message |
2853 | * @adapter - pointer to the adapter structure | 2854 | * @adapter: pointer to the adapter structure |
2854 | **/ | 2855 | **/ |
2855 | static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) | 2856 | static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) |
2856 | { | 2857 | { |
@@ -2956,7 +2957,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) | |||
2956 | 2957 | ||
2957 | /** | 2958 | /** |
2958 | * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) | 2959 | * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) |
2959 | * @tx_ring: tx descriptor ring (for a specific queue) to setup | 2960 | * @tx_ring: Tx descriptor ring (for a specific queue) to setup |
2960 | * | 2961 | * |
2961 | * Return 0 on success, negative on failure | 2962 | * Return 0 on success, negative on failure |
2962 | **/ | 2963 | **/ |
@@ -2983,8 +2984,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) | |||
2983 | err: | 2984 | err: |
2984 | vfree(tx_ring->tx_buffer_info); | 2985 | vfree(tx_ring->tx_buffer_info); |
2985 | tx_ring->tx_buffer_info = NULL; | 2986 | tx_ring->tx_buffer_info = NULL; |
2986 | hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " | 2987 | hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); |
2987 | "descriptor ring\n"); | ||
2988 | return -ENOMEM; | 2988 | return -ENOMEM; |
2989 | } | 2989 | } |
2990 | 2990 | ||
@@ -3006,8 +3006,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) | |||
3006 | err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); | 3006 | err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); |
3007 | if (!err) | 3007 | if (!err) |
3008 | continue; | 3008 | continue; |
3009 | hw_dbg(&adapter->hw, | 3009 | hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); |
3010 | "Allocation for Tx Queue %u failed\n", i); | ||
3011 | break; | 3010 | break; |
3012 | } | 3011 | } |
3013 | 3012 | ||
@@ -3016,7 +3015,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) | |||
3016 | 3015 | ||
3017 | /** | 3016 | /** |
3018 | * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) | 3017 | * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) |
3019 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | 3018 | * @rx_ring: Rx descriptor ring (for a specific queue) to setup |
3020 | * | 3019 | * |
3021 | * Returns 0 on success, negative on failure | 3020 | * Returns 0 on success, negative on failure |
3022 | **/ | 3021 | **/ |
@@ -3065,8 +3064,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) | |||
3065 | err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); | 3064 | err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); |
3066 | if (!err) | 3065 | if (!err) |
3067 | continue; | 3066 | continue; |
3068 | hw_dbg(&adapter->hw, | 3067 | hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); |
3069 | "Allocation for Rx Queue %u failed\n", i); | ||
3070 | break; | 3068 | break; |
3071 | } | 3069 | } |
3072 | return err; | 3070 | return err; |
@@ -3136,11 +3134,11 @@ static int ixgbevf_open(struct net_device *netdev) | |||
3136 | if (hw->adapter_stopped) { | 3134 | if (hw->adapter_stopped) { |
3137 | ixgbevf_reset(adapter); | 3135 | ixgbevf_reset(adapter); |
3138 | /* if adapter is still stopped then PF isn't up and | 3136 | /* if adapter is still stopped then PF isn't up and |
3139 | * the vf can't start. */ | 3137 | * the VF can't start. |
3138 | */ | ||
3140 | if (hw->adapter_stopped) { | 3139 | if (hw->adapter_stopped) { |
3141 | err = IXGBE_ERR_MBX; | 3140 | err = IXGBE_ERR_MBX; |
3142 | pr_err("Unable to start - perhaps the PF Driver isn't " | 3141 | pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); |
3143 | "up yet\n"); | ||
3144 | goto err_setup_reset; | 3142 | goto err_setup_reset; |
3145 | } | 3143 | } |
3146 | } | 3144 | } |
@@ -3163,8 +3161,7 @@ static int ixgbevf_open(struct net_device *netdev) | |||
3163 | 3161 | ||
3164 | ixgbevf_configure(adapter); | 3162 | ixgbevf_configure(adapter); |
3165 | 3163 | ||
3166 | /* | 3164 | /* Map the Tx/Rx rings to the vectors we were allotted. |
3167 | * Map the Tx/Rx rings to the vectors we were allotted. | ||
3168 | * if request_irq will be called in this function map_rings | 3165 | * if request_irq will be called in this function map_rings |
3169 | * must be called *before* up_complete | 3166 | * must be called *before* up_complete |
3170 | */ | 3167 | */ |
@@ -3288,6 +3285,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, | |||
3288 | 3285 | ||
3289 | if (first->protocol == htons(ETH_P_IP)) { | 3286 | if (first->protocol == htons(ETH_P_IP)) { |
3290 | struct iphdr *iph = ip_hdr(skb); | 3287 | struct iphdr *iph = ip_hdr(skb); |
3288 | |||
3291 | iph->tot_len = 0; | 3289 | iph->tot_len = 0; |
3292 | iph->check = 0; | 3290 | iph->check = 0; |
3293 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | 3291 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
@@ -3313,7 +3311,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, | |||
3313 | *hdr_len += l4len; | 3311 | *hdr_len += l4len; |
3314 | *hdr_len = skb_transport_offset(skb) + l4len; | 3312 | *hdr_len = skb_transport_offset(skb) + l4len; |
3315 | 3313 | ||
3316 | /* update gso size and bytecount with header size */ | 3314 | /* update GSO size and bytecount with header size */ |
3317 | first->gso_segs = skb_shinfo(skb)->gso_segs; | 3315 | first->gso_segs = skb_shinfo(skb)->gso_segs; |
3318 | first->bytecount += (first->gso_segs - 1) * *hdr_len; | 3316 | first->bytecount += (first->gso_segs - 1) * *hdr_len; |
3319 | 3317 | ||
@@ -3343,6 +3341,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, | |||
3343 | 3341 | ||
3344 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 3342 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
3345 | u8 l4_hdr = 0; | 3343 | u8 l4_hdr = 0; |
3344 | |||
3346 | switch (first->protocol) { | 3345 | switch (first->protocol) { |
3347 | case htons(ETH_P_IP): | 3346 | case htons(ETH_P_IP): |
3348 | vlan_macip_lens |= skb_network_header_len(skb); | 3347 | vlan_macip_lens |= skb_network_header_len(skb); |
@@ -3356,8 +3355,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, | |||
3356 | default: | 3355 | default: |
3357 | if (unlikely(net_ratelimit())) { | 3356 | if (unlikely(net_ratelimit())) { |
3358 | dev_warn(tx_ring->dev, | 3357 | dev_warn(tx_ring->dev, |
3359 | "partial checksum but proto=%x!\n", | 3358 | "partial checksum but proto=%x!\n", |
3360 | first->protocol); | 3359 | first->protocol); |
3361 | } | 3360 | } |
3362 | break; | 3361 | break; |
3363 | } | 3362 | } |
@@ -3380,8 +3379,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, | |||
3380 | default: | 3379 | default: |
3381 | if (unlikely(net_ratelimit())) { | 3380 | if (unlikely(net_ratelimit())) { |
3382 | dev_warn(tx_ring->dev, | 3381 | dev_warn(tx_ring->dev, |
3383 | "partial checksum but l4 proto=%x!\n", | 3382 | "partial checksum but l4 proto=%x!\n", |
3384 | l4_hdr); | 3383 | l4_hdr); |
3385 | } | 3384 | } |
3386 | break; | 3385 | break; |
3387 | } | 3386 | } |
@@ -3405,7 +3404,7 @@ static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) | |||
3405 | IXGBE_ADVTXD_DCMD_IFCS | | 3404 | IXGBE_ADVTXD_DCMD_IFCS | |
3406 | IXGBE_ADVTXD_DCMD_DEXT); | 3405 | IXGBE_ADVTXD_DCMD_DEXT); |
3407 | 3406 | ||
3408 | /* set HW vlan bit if vlan is present */ | 3407 | /* set HW VLAN bit if VLAN is present */ |
3409 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | 3408 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) |
3410 | cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); | 3409 | cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); |
3411 | 3410 | ||
@@ -3572,11 +3571,13 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) | |||
3572 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | 3571 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); |
3573 | /* Herbert's original patch had: | 3572 | /* Herbert's original patch had: |
3574 | * smp_mb__after_netif_stop_queue(); | 3573 | * smp_mb__after_netif_stop_queue(); |
3575 | * but since that doesn't exist yet, just open code it. */ | 3574 | * but since that doesn't exist yet, just open code it. |
3575 | */ | ||
3576 | smp_mb(); | 3576 | smp_mb(); |
3577 | 3577 | ||
3578 | /* We need to check again in a case another CPU has just | 3578 | /* We need to check again in a case another CPU has just |
3579 | * made room available. */ | 3579 | * made room available. |
3580 | */ | ||
3580 | if (likely(ixgbevf_desc_unused(tx_ring) < size)) | 3581 | if (likely(ixgbevf_desc_unused(tx_ring) < size)) |
3581 | return -EBUSY; | 3582 | return -EBUSY; |
3582 | 3583 | ||
@@ -3615,8 +3616,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3615 | 3616 | ||
3616 | tx_ring = adapter->tx_ring[skb->queue_mapping]; | 3617 | tx_ring = adapter->tx_ring[skb->queue_mapping]; |
3617 | 3618 | ||
3618 | /* | 3619 | /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, |
3619 | * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, | ||
3620 | * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, | 3620 | * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, |
3621 | * + 2 desc gap to keep tail from touching head, | 3621 | * + 2 desc gap to keep tail from touching head, |
3622 | * + 1 desc for context descriptor, | 3622 | * + 1 desc for context descriptor, |
@@ -3794,8 +3794,7 @@ static int ixgbevf_resume(struct pci_dev *pdev) | |||
3794 | u32 err; | 3794 | u32 err; |
3795 | 3795 | ||
3796 | pci_restore_state(pdev); | 3796 | pci_restore_state(pdev); |
3797 | /* | 3797 | /* pci_restore_state clears dev->state_saved so call |
3798 | * pci_restore_state clears dev->state_saved so call | ||
3799 | * pci_save_state to restore it. | 3798 | * pci_save_state to restore it. |
3800 | */ | 3799 | */ |
3801 | pci_save_state(pdev); | 3800 | pci_save_state(pdev); |
@@ -3930,8 +3929,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3930 | } else { | 3929 | } else { |
3931 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | 3930 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
3932 | if (err) { | 3931 | if (err) { |
3933 | dev_err(&pdev->dev, "No usable DMA " | 3932 | dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); |
3934 | "configuration, aborting\n"); | ||
3935 | goto err_dma; | 3933 | goto err_dma; |
3936 | } | 3934 | } |
3937 | pci_using_dac = 0; | 3935 | pci_using_dac = 0; |
@@ -3962,8 +3960,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3962 | hw->back = adapter; | 3960 | hw->back = adapter; |
3963 | adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); | 3961 | adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); |
3964 | 3962 | ||
3965 | /* | 3963 | /* call save state here in standalone driver because it relies on |
3966 | * call save state here in standalone driver because it relies on | ||
3967 | * adapter struct to exist, and needs to call netdev_priv | 3964 | * adapter struct to exist, and needs to call netdev_priv |
3968 | */ | 3965 | */ |
3969 | pci_save_state(pdev); | 3966 | pci_save_state(pdev); |
@@ -3978,7 +3975,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3978 | 3975 | ||
3979 | ixgbevf_assign_netdev_ops(netdev); | 3976 | ixgbevf_assign_netdev_ops(netdev); |
3980 | 3977 | ||
3981 | /* Setup hw api */ | 3978 | /* Setup HW API */ |
3982 | memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); | 3979 | memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); |
3983 | hw->mac.type = ii->mac; | 3980 | hw->mac.type = ii->mac; |
3984 | 3981 | ||
@@ -3998,11 +3995,11 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3998 | } | 3995 | } |
3999 | 3996 | ||
4000 | netdev->hw_features = NETIF_F_SG | | 3997 | netdev->hw_features = NETIF_F_SG | |
4001 | NETIF_F_IP_CSUM | | 3998 | NETIF_F_IP_CSUM | |
4002 | NETIF_F_IPV6_CSUM | | 3999 | NETIF_F_IPV6_CSUM | |
4003 | NETIF_F_TSO | | 4000 | NETIF_F_TSO | |
4004 | NETIF_F_TSO6 | | 4001 | NETIF_F_TSO6 | |
4005 | NETIF_F_RXCSUM; | 4002 | NETIF_F_RXCSUM; |
4006 | 4003 | ||
4007 | netdev->features = netdev->hw_features | | 4004 | netdev->features = netdev->hw_features | |
4008 | NETIF_F_HW_VLAN_CTAG_TX | | 4005 | NETIF_F_HW_VLAN_CTAG_TX | |
@@ -4131,7 +4128,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) | |||
4131 | * | 4128 | * |
4132 | * This function is called after a PCI bus error affecting | 4129 | * This function is called after a PCI bus error affecting |
4133 | * this device has been detected. | 4130 | * this device has been detected. |
4134 | */ | 4131 | **/ |
4135 | static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, | 4132 | static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, |
4136 | pci_channel_state_t state) | 4133 | pci_channel_state_t state) |
4137 | { | 4134 | { |
@@ -4166,7 +4163,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, | |||
4166 | * | 4163 | * |
4167 | * Restart the card from scratch, as if from a cold-boot. Implementation | 4164 | * Restart the card from scratch, as if from a cold-boot. Implementation |
4168 | * resembles the first-half of the ixgbevf_resume routine. | 4165 | * resembles the first-half of the ixgbevf_resume routine. |
4169 | */ | 4166 | **/ |
4170 | static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) | 4167 | static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) |
4171 | { | 4168 | { |
4172 | struct net_device *netdev = pci_get_drvdata(pdev); | 4169 | struct net_device *netdev = pci_get_drvdata(pdev); |
@@ -4194,7 +4191,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) | |||
4194 | * This callback is called when the error recovery driver tells us that | 4191 | * This callback is called when the error recovery driver tells us that |
4195 | * its OK to resume normal operation. Implementation resembles the | 4192 | * its OK to resume normal operation. Implementation resembles the |
4196 | * second-half of the ixgbevf_resume routine. | 4193 | * second-half of the ixgbevf_resume routine. |
4197 | */ | 4194 | **/ |
4198 | static void ixgbevf_io_resume(struct pci_dev *pdev) | 4195 | static void ixgbevf_io_resume(struct pci_dev *pdev) |
4199 | { | 4196 | { |
4200 | struct net_device *netdev = pci_get_drvdata(pdev); | 4197 | struct net_device *netdev = pci_get_drvdata(pdev); |
@@ -4214,17 +4211,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = { | |||
4214 | }; | 4211 | }; |
4215 | 4212 | ||
4216 | static struct pci_driver ixgbevf_driver = { | 4213 | static struct pci_driver ixgbevf_driver = { |
4217 | .name = ixgbevf_driver_name, | 4214 | .name = ixgbevf_driver_name, |
4218 | .id_table = ixgbevf_pci_tbl, | 4215 | .id_table = ixgbevf_pci_tbl, |
4219 | .probe = ixgbevf_probe, | 4216 | .probe = ixgbevf_probe, |
4220 | .remove = ixgbevf_remove, | 4217 | .remove = ixgbevf_remove, |
4221 | #ifdef CONFIG_PM | 4218 | #ifdef CONFIG_PM |
4222 | /* Power Management Hooks */ | 4219 | /* Power Management Hooks */ |
4223 | .suspend = ixgbevf_suspend, | 4220 | .suspend = ixgbevf_suspend, |
4224 | .resume = ixgbevf_resume, | 4221 | .resume = ixgbevf_resume, |
4225 | #endif | 4222 | #endif |
4226 | .shutdown = ixgbevf_shutdown, | 4223 | .shutdown = ixgbevf_shutdown, |
4227 | .err_handler = &ixgbevf_err_handler | 4224 | .err_handler = &ixgbevf_err_handler |
4228 | }; | 4225 | }; |
4229 | 4226 | ||
4230 | /** | 4227 | /** |
@@ -4236,6 +4233,7 @@ static struct pci_driver ixgbevf_driver = { | |||
4236 | static int __init ixgbevf_init_module(void) | 4233 | static int __init ixgbevf_init_module(void) |
4237 | { | 4234 | { |
4238 | int ret; | 4235 | int ret; |
4236 | |||
4239 | pr_info("%s - version %s\n", ixgbevf_driver_string, | 4237 | pr_info("%s - version %s\n", ixgbevf_driver_string, |
4240 | ixgbevf_driver_version); | 4238 | ixgbevf_driver_version); |
4241 | 4239 | ||
@@ -4266,6 +4264,7 @@ static void __exit ixgbevf_exit_module(void) | |||
4266 | char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) | 4264 | char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) |
4267 | { | 4265 | { |
4268 | struct ixgbevf_adapter *adapter = hw->back; | 4266 | struct ixgbevf_adapter *adapter = hw->back; |
4267 | |||
4269 | return adapter->netdev->name; | 4268 | return adapter->netdev->name; |
4270 | } | 4269 | } |
4271 | 4270 | ||
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index d5028ddf4b31..dc68fea4894b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2012 Intel Corporation. | 4 | Copyright(c) 1999 - 2015 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -52,10 +51,10 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) | |||
52 | } | 51 | } |
53 | 52 | ||
54 | /** | 53 | /** |
55 | * ixgbevf_poll_for_ack - Wait for message acknowledgement | 54 | * ixgbevf_poll_for_ack - Wait for message acknowledgment |
56 | * @hw: pointer to the HW structure | 55 | * @hw: pointer to the HW structure |
57 | * | 56 | * |
58 | * returns 0 if it successfully received a message acknowledgement | 57 | * returns 0 if it successfully received a message acknowledgment |
59 | **/ | 58 | **/ |
60 | static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) | 59 | static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) |
61 | { | 60 | { |
@@ -213,7 +212,7 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) | |||
213 | s32 ret_val = IXGBE_ERR_MBX; | 212 | s32 ret_val = IXGBE_ERR_MBX; |
214 | 213 | ||
215 | if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | | 214 | if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | |
216 | IXGBE_VFMAILBOX_RSTI))) { | 215 | IXGBE_VFMAILBOX_RSTI))) { |
217 | ret_val = 0; | 216 | ret_val = 0; |
218 | hw->mbx.stats.rsts++; | 217 | hw->mbx.stats.rsts++; |
219 | } | 218 | } |
@@ -234,7 +233,7 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) | |||
234 | /* Take ownership of the buffer */ | 233 | /* Take ownership of the buffer */ |
235 | IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); | 234 | IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); |
236 | 235 | ||
237 | /* reserve mailbox for vf use */ | 236 | /* reserve mailbox for VF use */ |
238 | if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) | 237 | if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) |
239 | ret_val = 0; | 238 | ret_val = 0; |
240 | 239 | ||
@@ -254,8 +253,7 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) | |||
254 | s32 ret_val; | 253 | s32 ret_val; |
255 | u16 i; | 254 | u16 i; |
256 | 255 | ||
257 | 256 | /* lock the mailbox to prevent PF/VF race condition */ | |
258 | /* lock the mailbox to prevent pf/vf race condition */ | ||
259 | ret_val = ixgbevf_obtain_mbx_lock_vf(hw); | 257 | ret_val = ixgbevf_obtain_mbx_lock_vf(hw); |
260 | if (ret_val) | 258 | if (ret_val) |
261 | goto out_no_write; | 259 | goto out_no_write; |
@@ -279,7 +277,7 @@ out_no_write: | |||
279 | } | 277 | } |
280 | 278 | ||
281 | /** | 279 | /** |
282 | * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf | 280 | * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for VF |
283 | * @hw: pointer to the HW structure | 281 | * @hw: pointer to the HW structure |
284 | * @msg: The message buffer | 282 | * @msg: The message buffer |
285 | * @size: Length of buffer | 283 | * @size: Length of buffer |
@@ -291,7 +289,7 @@ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) | |||
291 | s32 ret_val = 0; | 289 | s32 ret_val = 0; |
292 | u16 i; | 290 | u16 i; |
293 | 291 | ||
294 | /* lock the mailbox to prevent pf/vf race condition */ | 292 | /* lock the mailbox to prevent PF/VF race condition */ |
295 | ret_val = ixgbevf_obtain_mbx_lock_vf(hw); | 293 | ret_val = ixgbevf_obtain_mbx_lock_vf(hw); |
296 | if (ret_val) | 294 | if (ret_val) |
297 | goto out_no_read; | 295 | goto out_no_read; |
@@ -311,17 +309,18 @@ out_no_read: | |||
311 | } | 309 | } |
312 | 310 | ||
313 | /** | 311 | /** |
314 | * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox | 312 | * ixgbevf_init_mbx_params_vf - set initial values for VF mailbox |
315 | * @hw: pointer to the HW structure | 313 | * @hw: pointer to the HW structure |
316 | * | 314 | * |
317 | * Initializes the hw->mbx struct to correct values for vf mailbox | 315 | * Initializes the hw->mbx struct to correct values for VF mailbox |
318 | */ | 316 | */ |
319 | static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) | 317 | static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) |
320 | { | 318 | { |
321 | struct ixgbe_mbx_info *mbx = &hw->mbx; | 319 | struct ixgbe_mbx_info *mbx = &hw->mbx; |
322 | 320 | ||
323 | /* start mailbox as timed out and let the reset_hw call set the timeout | 321 | /* start mailbox as timed out and let the reset_hw call set the timeout |
324 | * value to begin communications */ | 322 | * value to begin communications |
323 | */ | ||
325 | mbx->timeout = 0; | 324 | mbx->timeout = 0; |
326 | mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; | 325 | mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; |
327 | 326 | ||
@@ -337,13 +336,13 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) | |||
337 | } | 336 | } |
338 | 337 | ||
339 | const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { | 338 | const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { |
340 | .init_params = ixgbevf_init_mbx_params_vf, | 339 | .init_params = ixgbevf_init_mbx_params_vf, |
341 | .read = ixgbevf_read_mbx_vf, | 340 | .read = ixgbevf_read_mbx_vf, |
342 | .write = ixgbevf_write_mbx_vf, | 341 | .write = ixgbevf_write_mbx_vf, |
343 | .read_posted = ixgbevf_read_posted_mbx, | 342 | .read_posted = ixgbevf_read_posted_mbx, |
344 | .write_posted = ixgbevf_write_posted_mbx, | 343 | .write_posted = ixgbevf_write_posted_mbx, |
345 | .check_for_msg = ixgbevf_check_for_msg_vf, | 344 | .check_for_msg = ixgbevf_check_for_msg_vf, |
346 | .check_for_ack = ixgbevf_check_for_ack_vf, | 345 | .check_for_ack = ixgbevf_check_for_ack_vf, |
347 | .check_for_rst = ixgbevf_check_for_rst_vf, | 346 | .check_for_rst = ixgbevf_check_for_rst_vf, |
348 | }; | 347 | }; |
349 | 348 | ||
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 0bc30058ff82..6253e9335cae 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2012 Intel Corporation. | 4 | Copyright(c) 1999 - 2015 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -30,56 +29,54 @@ | |||
30 | 29 | ||
31 | #include "vf.h" | 30 | #include "vf.h" |
32 | 31 | ||
33 | #define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ | 32 | #define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ |
34 | #define IXGBE_ERR_MBX -100 | 33 | #define IXGBE_ERR_MBX -100 |
35 | 34 | ||
36 | #define IXGBE_VFMAILBOX 0x002FC | 35 | #define IXGBE_VFMAILBOX 0x002FC |
37 | #define IXGBE_VFMBMEM 0x00200 | 36 | #define IXGBE_VFMBMEM 0x00200 |
38 | 37 | ||
39 | /* Define mailbox register bits */ | 38 | /* Define mailbox register bits */ |
40 | #define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ | 39 | #define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ |
41 | #define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ | 40 | #define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ |
42 | #define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ | 41 | #define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ |
43 | #define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ | 42 | #define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ |
44 | #define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ | 43 | #define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ |
45 | #define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ | 44 | #define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ |
46 | #define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ | 45 | #define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ |
47 | #define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ | 46 | #define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ |
48 | #define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ | 47 | #define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ |
49 | 48 | ||
50 | #define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) | 49 | #define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) |
51 | #define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) | 50 | #define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) |
52 | 51 | ||
53 | #define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ | 52 | #define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ |
54 | #define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ | 53 | #define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ |
55 | #define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ | 54 | #define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ |
56 | #define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ | 55 | #define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ |
57 | #define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ | 56 | #define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ |
58 | 57 | ||
59 | #define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ | 58 | #define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ |
60 | #define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ | 59 | #define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ |
61 | #define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ | 60 | #define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ |
62 | #define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ | 61 | #define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ |
63 | |||
64 | 62 | ||
65 | /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the | 63 | /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the |
66 | * PF. The reverse is true if it is IXGBE_PF_*. | 64 | * PF. The reverse is true if it is IXGBE_PF_*. |
67 | * Message ACK's are the value or'd with 0xF0000000 | 65 | * Message ACK's are the value or'd with 0xF0000000 |
68 | */ | 66 | */ |
69 | #define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with | 67 | /* Messages below or'd with this are the ACK */ |
70 | * this are the ACK */ | 68 | #define IXGBE_VT_MSGTYPE_ACK 0x80000000 |
71 | #define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with | 69 | /* Messages below or'd with this are the NACK */ |
72 | * this are the NACK */ | 70 | #define IXGBE_VT_MSGTYPE_NACK 0x40000000 |
73 | #define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still | 71 | /* Indicates that VF is still clear to send requests */ |
74 | * clear to send requests */ | 72 | #define IXGBE_VT_MSGTYPE_CTS 0x20000000 |
75 | #define IXGBE_VT_MSGINFO_SHIFT 16 | 73 | #define IXGBE_VT_MSGINFO_SHIFT 16 |
76 | /* bits 23:16 are used for exra info for certain messages */ | 74 | /* bits 23:16 are used for exra info for certain messages */ |
77 | #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) | 75 | #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) |
78 | 76 | ||
79 | /* definitions to support mailbox API version negotiation */ | 77 | /* definitions to support mailbox API version negotiation */ |
80 | 78 | ||
81 | /* | 79 | /* each element denotes a version of the API; existing numbers may not |
82 | * each element denotes a version of the API; existing numbers may not | ||
83 | * change; any additions must go at the end | 80 | * change; any additions must go at the end |
84 | */ | 81 | */ |
85 | enum ixgbe_pfvf_api_rev { | 82 | enum ixgbe_pfvf_api_rev { |
@@ -91,10 +88,10 @@ enum ixgbe_pfvf_api_rev { | |||
91 | }; | 88 | }; |
92 | 89 | ||
93 | /* mailbox API, legacy requests */ | 90 | /* mailbox API, legacy requests */ |
94 | #define IXGBE_VF_RESET 0x01 /* VF requests reset */ | 91 | #define IXGBE_VF_RESET 0x01 /* VF requests reset */ |
95 | #define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ | 92 | #define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ |
96 | #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ | 93 | #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ |
97 | #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ | 94 | #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ |
98 | 95 | ||
99 | /* mailbox API, version 1.0 VF requests */ | 96 | /* mailbox API, version 1.0 VF requests */ |
100 | #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ | 97 | #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ |
@@ -105,20 +102,20 @@ enum ixgbe_pfvf_api_rev { | |||
105 | #define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */ | 102 | #define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */ |
106 | 103 | ||
107 | /* GET_QUEUES return data indices within the mailbox */ | 104 | /* GET_QUEUES return data indices within the mailbox */ |
108 | #define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ | 105 | #define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ |
109 | #define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ | 106 | #define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ |
110 | #define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ | 107 | #define IXGBE_VF_TRANS_VLAN 3 /* Indication of port VLAN */ |
111 | #define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ | 108 | #define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ |
112 | 109 | ||
113 | /* length of permanent address message returned from PF */ | 110 | /* length of permanent address message returned from PF */ |
114 | #define IXGBE_VF_PERMADDR_MSG_LEN 4 | 111 | #define IXGBE_VF_PERMADDR_MSG_LEN 4 |
115 | /* word in permanent address message with the current multicast type */ | 112 | /* word in permanent address message with the current multicast type */ |
116 | #define IXGBE_VF_MC_TYPE_WORD 3 | 113 | #define IXGBE_VF_MC_TYPE_WORD 3 |
117 | 114 | ||
118 | #define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ | 115 | #define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ |
119 | 116 | ||
120 | #define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ | 117 | #define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ |
121 | #define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ | 118 | #define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ |
122 | 119 | ||
123 | /* forward declaration of the HW struct */ | 120 | /* forward declaration of the HW struct */ |
124 | struct ixgbe_hw; | 121 | struct ixgbe_hw; |
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h index 3e712fd6e695..2764fd16261f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/regs.h +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2014 Intel Corporation. | 4 | Copyright(c) 1999 - 2015 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -28,58 +27,58 @@ | |||
28 | #ifndef _IXGBEVF_REGS_H_ | 27 | #ifndef _IXGBEVF_REGS_H_ |
29 | #define _IXGBEVF_REGS_H_ | 28 | #define _IXGBEVF_REGS_H_ |
30 | 29 | ||
31 | #define IXGBE_VFCTRL 0x00000 | 30 | #define IXGBE_VFCTRL 0x00000 |
32 | #define IXGBE_VFSTATUS 0x00008 | 31 | #define IXGBE_VFSTATUS 0x00008 |
33 | #define IXGBE_VFLINKS 0x00010 | 32 | #define IXGBE_VFLINKS 0x00010 |
34 | #define IXGBE_VFFRTIMER 0x00048 | 33 | #define IXGBE_VFFRTIMER 0x00048 |
35 | #define IXGBE_VFRXMEMWRAP 0x03190 | 34 | #define IXGBE_VFRXMEMWRAP 0x03190 |
36 | #define IXGBE_VTEICR 0x00100 | 35 | #define IXGBE_VTEICR 0x00100 |
37 | #define IXGBE_VTEICS 0x00104 | 36 | #define IXGBE_VTEICS 0x00104 |
38 | #define IXGBE_VTEIMS 0x00108 | 37 | #define IXGBE_VTEIMS 0x00108 |
39 | #define IXGBE_VTEIMC 0x0010C | 38 | #define IXGBE_VTEIMC 0x0010C |
40 | #define IXGBE_VTEIAC 0x00110 | 39 | #define IXGBE_VTEIAC 0x00110 |
41 | #define IXGBE_VTEIAM 0x00114 | 40 | #define IXGBE_VTEIAM 0x00114 |
42 | #define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) | 41 | #define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) |
43 | #define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) | 42 | #define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) |
44 | #define IXGBE_VTIVAR_MISC 0x00140 | 43 | #define IXGBE_VTIVAR_MISC 0x00140 |
45 | #define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) | 44 | #define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) |
46 | #define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) | 45 | #define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) |
47 | #define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) | 46 | #define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) |
48 | #define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) | 47 | #define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) |
49 | #define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) | 48 | #define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) |
50 | #define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) | 49 | #define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) |
51 | #define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) | 50 | #define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) |
52 | #define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) | 51 | #define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) |
53 | #define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) | 52 | #define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) |
54 | #define IXGBE_VFPSRTYPE 0x00300 | 53 | #define IXGBE_VFPSRTYPE 0x00300 |
55 | #define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) | 54 | #define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) |
56 | #define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) | 55 | #define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) |
57 | #define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) | 56 | #define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) |
58 | #define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) | 57 | #define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) |
59 | #define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) | 58 | #define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) |
60 | #define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) | 59 | #define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) |
61 | #define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) | 60 | #define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) |
62 | #define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) | 61 | #define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) |
63 | #define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) | 62 | #define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) |
64 | #define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) | 63 | #define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) |
65 | #define IXGBE_VFGPRC 0x0101C | 64 | #define IXGBE_VFGPRC 0x0101C |
66 | #define IXGBE_VFGPTC 0x0201C | 65 | #define IXGBE_VFGPTC 0x0201C |
67 | #define IXGBE_VFGORC_LSB 0x01020 | 66 | #define IXGBE_VFGORC_LSB 0x01020 |
68 | #define IXGBE_VFGORC_MSB 0x01024 | 67 | #define IXGBE_VFGORC_MSB 0x01024 |
69 | #define IXGBE_VFGOTC_LSB 0x02020 | 68 | #define IXGBE_VFGOTC_LSB 0x02020 |
70 | #define IXGBE_VFGOTC_MSB 0x02024 | 69 | #define IXGBE_VFGOTC_MSB 0x02024 |
71 | #define IXGBE_VFMPRC 0x01034 | 70 | #define IXGBE_VFMPRC 0x01034 |
72 | #define IXGBE_VFMRQC 0x3000 | 71 | #define IXGBE_VFMRQC 0x3000 |
73 | #define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) | 72 | #define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) |
74 | #define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) | 73 | #define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) |
75 | 74 | ||
76 | /* VFMRQC bits */ | 75 | /* VFMRQC bits */ |
77 | #define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */ | 76 | #define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */ |
78 | #define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000 | 77 | #define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000 |
79 | #define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000 | 78 | #define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000 |
80 | #define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000 | 79 | #define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000 |
81 | #define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000 | 80 | #define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000 |
82 | 81 | ||
83 | #define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) | 82 | #define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) |
84 | 83 | ||
85 | #endif /* _IXGBEVF_REGS_H_ */ | 84 | #endif /* _IXGBEVF_REGS_H_ */ |
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index f510a5822f90..0d7da03014c2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2012 Intel Corporation. | 4 | Copyright(c) 1999 - 2015 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -102,9 +101,10 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) | |||
102 | 101 | ||
103 | mdelay(10); | 102 | mdelay(10); |
104 | 103 | ||
105 | /* set our "perm_addr" based on info provided by PF */ | 104 | /* set our "perm_addr" based on info provided by PF |
106 | /* also set up the mc_filter_type which is piggy backed | 105 | * also set up the mc_filter_type which is piggy backed |
107 | * on the mac address in word 3 */ | 106 | * on the mac address in word 3 |
107 | */ | ||
108 | ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); | 108 | ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); |
109 | if (ret_val) | 109 | if (ret_val) |
110 | return ret_val; | 110 | return ret_val; |
@@ -138,8 +138,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) | |||
138 | u32 reg_val; | 138 | u32 reg_val; |
139 | u16 i; | 139 | u16 i; |
140 | 140 | ||
141 | /* | 141 | /* Set the adapter_stopped flag so other driver functions stop touching |
142 | * Set the adapter_stopped flag so other driver functions stop touching | ||
143 | * the hardware | 142 | * the hardware |
144 | */ | 143 | */ |
145 | hw->adapter_stopped = true; | 144 | hw->adapter_stopped = true; |
@@ -182,7 +181,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) | |||
182 | * | 181 | * |
183 | * Extracts the 12 bits, from a multicast address, to determine which | 182 | * Extracts the 12 bits, from a multicast address, to determine which |
184 | * bit-vector to set in the multicast table. The hardware uses 12 bits, from | 183 | * bit-vector to set in the multicast table. The hardware uses 12 bits, from |
185 | * incoming rx multicast addresses, to determine the bit-vector to check in | 184 | * incoming Rx multicast addresses, to determine the bit-vector to check in |
186 | * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set | 185 | * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set |
187 | * by the MO field of the MCSTCTRL. The MO field is set during initialization | 186 | * by the MO field of the MCSTCTRL. The MO field is set during initialization |
188 | * to mc_filter_type. | 187 | * to mc_filter_type. |
@@ -233,8 +232,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) | |||
233 | s32 ret_val; | 232 | s32 ret_val; |
234 | 233 | ||
235 | memset(msgbuf, 0, sizeof(msgbuf)); | 234 | memset(msgbuf, 0, sizeof(msgbuf)); |
236 | /* | 235 | /* If index is one then this is the start of a new list and needs |
237 | * If index is one then this is the start of a new list and needs | ||
238 | * indication to the PF so it can do it's own list management. | 236 | * indication to the PF so it can do it's own list management. |
239 | * If it is zero then that tells the PF to just clear all of | 237 | * If it is zero then that tells the PF to just clear all of |
240 | * this VF's macvlans and there is no new list. | 238 | * this VF's macvlans and there is no new list. |
@@ -292,7 +290,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, | |||
292 | } | 290 | } |
293 | 291 | ||
294 | static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, | 292 | static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, |
295 | u32 *msg, u16 size) | 293 | u32 *msg, u16 size) |
296 | { | 294 | { |
297 | struct ixgbe_mbx_info *mbx = &hw->mbx; | 295 | struct ixgbe_mbx_info *mbx = &hw->mbx; |
298 | u32 retmsg[IXGBE_VFMAILBOX_SIZE]; | 296 | u32 retmsg[IXGBE_VFMAILBOX_SIZE]; |
@@ -348,7 +346,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, | |||
348 | } | 346 | } |
349 | 347 | ||
350 | /** | 348 | /** |
351 | * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address | 349 | * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address |
352 | * @hw: pointer to the HW structure | 350 | * @hw: pointer to the HW structure |
353 | * @vlan: 12 bit VLAN ID | 351 | * @vlan: 12 bit VLAN ID |
354 | * @vind: unused by VF drivers | 352 | * @vind: unused by VF drivers |
@@ -462,7 +460,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, | |||
462 | } | 460 | } |
463 | 461 | ||
464 | /* if the read failed it could just be a mailbox collision, best wait | 462 | /* if the read failed it could just be a mailbox collision, best wait |
465 | * until we are called again and don't report an error */ | 463 | * until we are called again and don't report an error |
464 | */ | ||
466 | if (mbx->ops.read(hw, &in_msg, 1)) | 465 | if (mbx->ops.read(hw, &in_msg, 1)) |
467 | goto out; | 466 | goto out; |
468 | 467 | ||
@@ -480,7 +479,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, | |||
480 | } | 479 | } |
481 | 480 | ||
482 | /* if we passed all the tests above then the link is up and we no | 481 | /* if we passed all the tests above then the link is up and we no |
483 | * longer need to check for link */ | 482 | * longer need to check for link |
483 | */ | ||
484 | mac->get_link_status = false; | 484 | mac->get_link_status = false; |
485 | 485 | ||
486 | out: | 486 | out: |
@@ -561,8 +561,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, | |||
561 | if (!err) { | 561 | if (!err) { |
562 | msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; | 562 | msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; |
563 | 563 | ||
564 | /* | 564 | /* if we we didn't get an ACK there must have been |
565 | * if we we didn't get an ACK there must have been | ||
566 | * some sort of mailbox error so we should treat it | 565 | * some sort of mailbox error so we should treat it |
567 | * as such | 566 | * as such |
568 | */ | 567 | */ |
@@ -595,17 +594,17 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, | |||
595 | } | 594 | } |
596 | 595 | ||
597 | static const struct ixgbe_mac_operations ixgbevf_mac_ops = { | 596 | static const struct ixgbe_mac_operations ixgbevf_mac_ops = { |
598 | .init_hw = ixgbevf_init_hw_vf, | 597 | .init_hw = ixgbevf_init_hw_vf, |
599 | .reset_hw = ixgbevf_reset_hw_vf, | 598 | .reset_hw = ixgbevf_reset_hw_vf, |
600 | .start_hw = ixgbevf_start_hw_vf, | 599 | .start_hw = ixgbevf_start_hw_vf, |
601 | .get_mac_addr = ixgbevf_get_mac_addr_vf, | 600 | .get_mac_addr = ixgbevf_get_mac_addr_vf, |
602 | .stop_adapter = ixgbevf_stop_hw_vf, | 601 | .stop_adapter = ixgbevf_stop_hw_vf, |
603 | .setup_link = ixgbevf_setup_mac_link_vf, | 602 | .setup_link = ixgbevf_setup_mac_link_vf, |
604 | .check_link = ixgbevf_check_mac_link_vf, | 603 | .check_link = ixgbevf_check_mac_link_vf, |
605 | .set_rar = ixgbevf_set_rar_vf, | 604 | .set_rar = ixgbevf_set_rar_vf, |
606 | .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, | 605 | .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, |
607 | .set_uc_addr = ixgbevf_set_uc_addr_vf, | 606 | .set_uc_addr = ixgbevf_set_uc_addr_vf, |
608 | .set_vfta = ixgbevf_set_vfta_vf, | 607 | .set_vfta = ixgbevf_set_vfta_vf, |
609 | }; | 608 | }; |
610 | 609 | ||
611 | const struct ixgbevf_info ixgbevf_82599_vf_info = { | 610 | const struct ixgbevf_info ixgbevf_82599_vf_info = { |
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 5b172427f459..6688250da7a1 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2014 Intel Corporation. | 4 | Copyright(c) 1999 - 2015 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -169,7 +168,7 @@ struct ixgbevf_hw_stats { | |||
169 | }; | 168 | }; |
170 | 169 | ||
171 | struct ixgbevf_info { | 170 | struct ixgbevf_info { |
172 | enum ixgbe_mac_type mac; | 171 | enum ixgbe_mac_type mac; |
173 | const struct ixgbe_mac_operations *mac_ops; | 172 | const struct ixgbe_mac_operations *mac_ops; |
174 | }; | 173 | }; |
175 | 174 | ||
@@ -185,23 +184,26 @@ static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) | |||
185 | return; | 184 | return; |
186 | writel(value, reg_addr + reg); | 185 | writel(value, reg_addr + reg); |
187 | } | 186 | } |
187 | |||
188 | #define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v) | 188 | #define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v) |
189 | 189 | ||
190 | u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg); | 190 | u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg); |
191 | #define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r) | 191 | #define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r) |
192 | 192 | ||
193 | static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg, | 193 | static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg, |
194 | u32 offset, u32 value) | 194 | u32 offset, u32 value) |
195 | { | 195 | { |
196 | ixgbe_write_reg(hw, reg + (offset << 2), value); | 196 | ixgbe_write_reg(hw, reg + (offset << 2), value); |
197 | } | 197 | } |
198 | |||
198 | #define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v) | 199 | #define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v) |
199 | 200 | ||
200 | static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, | 201 | static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, |
201 | u32 offset) | 202 | u32 offset) |
202 | { | 203 | { |
203 | return ixgbevf_read_reg(hw, reg + (offset << 2)); | 204 | return ixgbevf_read_reg(hw, reg + (offset << 2)); |
204 | } | 205 | } |
206 | |||
205 | #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) | 207 | #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) |
206 | 208 | ||
207 | void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); | 209 | void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); |
@@ -209,4 +211,3 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); | |||
209 | int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, | 211 | int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, |
210 | unsigned int *default_tc); | 212 | unsigned int *default_tc); |
211 | #endif /* __IXGBE_VF_H__ */ | 213 | #endif /* __IXGBE_VF_H__ */ |
212 | |||