aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-01-23 19:29:26 -0500
committerDavid S. Miller <davem@davemloft.net>2018-01-23 19:29:26 -0500
commit0542e13b5f5663ffdc18e0e028413b2cd09f426f (patch)
tree9e0591bb75265112035065654faf5230f01692e5
parentc89b517da1b7929bc13d9b9c9dca8183022b4804 (diff)
parent85bc2663a5df852ade80fa328c21716a8fe132f6 (diff)
Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 10GbE Intel Wired LAN Driver Updates 2018-01-23 This series contains updates to ixgbe only. Shannon Nelson provides an implementation of the ipsec hardware offload feature for the ixgbe driver for these devices: x540, x550, 82599. The ixgbe NICs support ipsec offload for 1024 Rx and 1024 Tx Security Associations (SAs), using up to 128 inbound IP addresses, and using the rfc4106(gcm(aes)) encryption. This code does not yet support checksum offload, or TSO in conjunction with the ipsec offload - those will be added in the future. This code shows improvements in both packet throughput and CPU utilization. For example, here are some quicky numbers that show the magnitude of the performance gain on a single run of "iperf -c <dest>" with the ipsec offload on both ends of a point-to-point connection: 9.4 Gbps - normal case 7.6 Gbps - ipsec with offload 343 Mbps - ipsec no offload ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c941
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h93
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h22
8 files changed, 1116 insertions, 23 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 35e6fa643c7e..8319465eb38d 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -42,3 +42,4 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
42ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o 42ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
43ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o 43ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
44ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o 44ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
45ixgbe-$(CONFIG_XFRM_OFFLOAD) += ixgbe_ipsec.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 49ab0c7a9cd5..c1e3a0039ea5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -52,6 +52,7 @@
52#ifdef CONFIG_IXGBE_DCA 52#ifdef CONFIG_IXGBE_DCA
53#include <linux/dca.h> 53#include <linux/dca.h>
54#endif 54#endif
55#include "ixgbe_ipsec.h"
55 56
56#include <net/xdp.h> 57#include <net/xdp.h>
57#include <net/busy_poll.h> 58#include <net/busy_poll.h>
@@ -171,10 +172,11 @@ enum ixgbe_tx_flags {
171 IXGBE_TX_FLAGS_CC = 0x08, 172 IXGBE_TX_FLAGS_CC = 0x08,
172 IXGBE_TX_FLAGS_IPV4 = 0x10, 173 IXGBE_TX_FLAGS_IPV4 = 0x10,
173 IXGBE_TX_FLAGS_CSUM = 0x20, 174 IXGBE_TX_FLAGS_CSUM = 0x20,
175 IXGBE_TX_FLAGS_IPSEC = 0x40,
174 176
175 /* software defined flags */ 177 /* software defined flags */
176 IXGBE_TX_FLAGS_SW_VLAN = 0x40, 178 IXGBE_TX_FLAGS_SW_VLAN = 0x80,
177 IXGBE_TX_FLAGS_FCOE = 0x80, 179 IXGBE_TX_FLAGS_FCOE = 0x100,
178}; 180};
179 181
180/* VLAN info */ 182/* VLAN info */
@@ -629,15 +631,18 @@ struct ixgbe_adapter {
629#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) 631#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
630#define IXGBE_FLAG2_EEE_ENABLED BIT(15) 632#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
631#define IXGBE_FLAG2_RX_LEGACY BIT(16) 633#define IXGBE_FLAG2_RX_LEGACY BIT(16)
634#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
632 635
633 /* Tx fast path data */ 636 /* Tx fast path data */
634 int num_tx_queues; 637 int num_tx_queues;
635 u16 tx_itr_setting; 638 u16 tx_itr_setting;
636 u16 tx_work_limit; 639 u16 tx_work_limit;
640 u64 tx_ipsec;
637 641
638 /* Rx fast path data */ 642 /* Rx fast path data */
639 int num_rx_queues; 643 int num_rx_queues;
640 u16 rx_itr_setting; 644 u16 rx_itr_setting;
645 u64 rx_ipsec;
641 646
642 /* Port number used to identify VXLAN traffic */ 647 /* Port number used to identify VXLAN traffic */
643 __be16 vxlan_port; 648 __be16 vxlan_port;
@@ -781,6 +786,10 @@ struct ixgbe_adapter {
781 786
782#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ 787#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
783 u32 *rss_key; 788 u32 *rss_key;
789
790#ifdef CONFIG_XFRM
791 struct ixgbe_ipsec *ipsec;
792#endif /* CONFIG_XFRM */
784}; 793};
785 794
786static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) 795static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -1011,4 +1020,24 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter);
1011void ixgbe_store_reta(struct ixgbe_adapter *adapter); 1020void ixgbe_store_reta(struct ixgbe_adapter *adapter);
1012s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 1021s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1013 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); 1022 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
1023#ifdef CONFIG_XFRM_OFFLOAD
1024void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
1025void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
1026void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
1027void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1028 union ixgbe_adv_rx_desc *rx_desc,
1029 struct sk_buff *skb);
1030int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
1031 struct ixgbe_ipsec_tx_data *itd);
1032#else
1033static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { };
1034static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { };
1035static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { };
1036static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1037 union ixgbe_adv_rx_desc *rx_desc,
1038 struct sk_buff *skb) { };
1039static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
1040 struct ixgbe_tx_buffer *first,
1041 struct ixgbe_ipsec_tx_data *itd) { return 0; };
1042#endif /* CONFIG_XFRM_OFFLOAD */
1014#endif /* _IXGBE_H_ */ 1043#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index f064099733b6..317351025fd7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -115,6 +115,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
115 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)}, 115 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
116 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)}, 116 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
117 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)}, 117 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
118 {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
119 {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
118#ifdef IXGBE_FCOE 120#ifdef IXGBE_FCOE
119 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 121 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
120 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 122 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
new file mode 100644
index 000000000000..93eacddb6704
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -0,0 +1,941 @@
1/*******************************************************************************
2 *
3 * Intel 10 Gigabit PCI Express Linux driver
4 * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * Linux NICS <linux.nics@intel.com>
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "ixgbe.h"
29#include <net/xfrm.h>
30#include <crypto/aead.h>
31
32/**
33 * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
34 * @hw: hw specific details
35 * @idx: register index to write
36 * @key: key byte array
37 * @salt: salt bytes
38 **/
39static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
40 u32 key[], u32 salt)
41{
42 u32 reg;
43 int i;
44
45 for (i = 0; i < 4; i++)
46 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
47 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
48 IXGBE_WRITE_FLUSH(hw);
49
50 reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
51 reg &= IXGBE_RXTXIDX_IPS_EN;
52 reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
53 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
54 IXGBE_WRITE_FLUSH(hw);
55}
56
57/**
58 * ixgbe_ipsec_set_rx_item - set an Rx table item
59 * @hw: hw specific details
60 * @idx: register index to write
61 * @tbl: table selector
62 *
63 * Trigger the device to store into a particular Rx table the
64 * data that has already been loaded into the input register
65 **/
66static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
67 enum ixgbe_ipsec_tbl_sel tbl)
68{
69 u32 reg;
70
71 reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
72 reg &= IXGBE_RXTXIDX_IPS_EN;
73 reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
74 idx << IXGBE_RXTXIDX_IDX_SHIFT |
75 IXGBE_RXTXIDX_WRITE;
76 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
77 IXGBE_WRITE_FLUSH(hw);
78}
79
80/**
81 * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
82 * @hw: hw specific details
83 * @idx: register index to write
84 * @spi: security parameter index
85 * @key: key byte array
86 * @salt: salt bytes
87 * @mode: rx decrypt control bits
88 * @ip_idx: index into IP table for related IP address
89 **/
90static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
91 u32 key[], u32 salt, u32 mode, u32 ip_idx)
92{
93 int i;
94
95 /* store the SPI (in bigendian) and IPidx */
96 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
97 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
98 IXGBE_WRITE_FLUSH(hw);
99
100 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
101
102 /* store the key, salt, and mode */
103 for (i = 0; i < 4; i++)
104 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
105 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
106 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
107 IXGBE_WRITE_FLUSH(hw);
108
109 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
110}
111
112/**
113 * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
114 * @hw: hw specific details
115 * @idx: register index to write
116 * @addr: IP address byte array
117 **/
118static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
119{
120 int i;
121
122 /* store the ip address */
123 for (i = 0; i < 4; i++)
124 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
125 IXGBE_WRITE_FLUSH(hw);
126
127 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
128}
129
130/**
131 * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
132 * @adapter: board private structure
133 **/
134static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
135{
136 struct ixgbe_ipsec *ipsec = adapter->ipsec;
137 struct ixgbe_hw *hw = &adapter->hw;
138 u32 buf[4] = {0, 0, 0, 0};
139 u16 idx;
140
141 /* disable Rx and Tx SA lookup */
142 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
143 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
144
145 /* scrub the tables - split the loops for the max of the IP table */
146 for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
147 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
148 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
149 ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
150 }
151 for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
152 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
153 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
154 }
155
156 ipsec->num_rx_sa = 0;
157 ipsec->num_tx_sa = 0;
158}
159
160/**
161 * ixgbe_ipsec_stop_data
162 * @adapter: board private structure
163 **/
164static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
165{
166 struct ixgbe_hw *hw = &adapter->hw;
167 bool link = adapter->link_up;
168 u32 t_rdy, r_rdy;
169 u32 limit;
170 u32 reg;
171
172 /* halt data paths */
173 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
174 reg |= IXGBE_SECTXCTRL_TX_DIS;
175 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
176
177 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
178 reg |= IXGBE_SECRXCTRL_RX_DIS;
179 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
180
181 IXGBE_WRITE_FLUSH(hw);
182
183 /* If the tx fifo doesn't have link, but still has data,
184 * we can't clear the tx sec block. Set the MAC loopback
185 * before block clear
186 */
187 if (!link) {
188 reg = IXGBE_READ_REG(hw, IXGBE_MACC);
189 reg |= IXGBE_MACC_FLU;
190 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
191
192 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
193 reg |= IXGBE_HLREG0_LPBK;
194 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
195
196 IXGBE_WRITE_FLUSH(hw);
197 mdelay(3);
198 }
199
200 /* wait for the paths to empty */
201 limit = 20;
202 do {
203 mdelay(10);
204 t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
205 IXGBE_SECTXSTAT_SECTX_RDY;
206 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
207 IXGBE_SECRXSTAT_SECRX_RDY;
208 } while (!t_rdy && !r_rdy && limit--);
209
210 /* undo loopback if we played with it earlier */
211 if (!link) {
212 reg = IXGBE_READ_REG(hw, IXGBE_MACC);
213 reg &= ~IXGBE_MACC_FLU;
214 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
215
216 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
217 reg &= ~IXGBE_HLREG0_LPBK;
218 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
219
220 IXGBE_WRITE_FLUSH(hw);
221 }
222}
223
224/**
225 * ixgbe_ipsec_stop_engine
226 * @adapter: board private structure
227 **/
228static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
229{
230 struct ixgbe_hw *hw = &adapter->hw;
231 u32 reg;
232
233 ixgbe_ipsec_stop_data(adapter);
234
235 /* disable Rx and Tx SA lookup */
236 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
237 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
238
239 /* disable the Rx and Tx engines and full packet store-n-forward */
240 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
241 reg |= IXGBE_SECTXCTRL_SECTX_DIS;
242 reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
243 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
244
245 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
246 reg |= IXGBE_SECRXCTRL_SECRX_DIS;
247 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
248
249 /* restore the "tx security buffer almost full threshold" to 0x250 */
250 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
251
252 /* Set minimum IFG between packets back to the default 0x1 */
253 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
254 reg = (reg & 0xfffffff0) | 0x1;
255 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
256
257 /* final set for normal (no ipsec offload) processing */
258 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
259 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
260
261 IXGBE_WRITE_FLUSH(hw);
262}
263
264/**
265 * ixgbe_ipsec_start_engine
266 * @adapter: board private structure
267 *
268 * NOTE: this increases power consumption whether being used or not
269 **/
270static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
271{
272 struct ixgbe_hw *hw = &adapter->hw;
273 u32 reg;
274
275 ixgbe_ipsec_stop_data(adapter);
276
277 /* Set minimum IFG between packets to 3 */
278 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
279 reg = (reg & 0xfffffff0) | 0x3;
280 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
281
282 /* Set "tx security buffer almost full threshold" to 0x15 so that the
283 * almost full indication is generated only after buffer contains at
284 * least an entire jumbo packet.
285 */
286 reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
287 reg = (reg & 0xfffffc00) | 0x15;
288 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
289
290 /* restart the data paths by clearing the DISABLE bits */
291 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
292 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
293
294 /* enable Rx and Tx SA lookup */
295 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
296 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
297
298 IXGBE_WRITE_FLUSH(hw);
299}
300
301/**
302 * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
303 * @adapter: board private structure
304 **/
305void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
306{
307 struct ixgbe_ipsec *ipsec = adapter->ipsec;
308 struct ixgbe_hw *hw = &adapter->hw;
309 int i;
310
311 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
312 return;
313
314 /* clean up and restart the engine */
315 ixgbe_ipsec_stop_engine(adapter);
316 ixgbe_ipsec_clear_hw_tables(adapter);
317 ixgbe_ipsec_start_engine(adapter);
318
319 /* reload the IP addrs */
320 for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
321 struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
322
323 if (ipsa->used)
324 ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
325 }
326
327 /* reload the Rx and Tx keys */
328 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
329 struct rx_sa *rsa = &ipsec->rx_tbl[i];
330 struct tx_sa *tsa = &ipsec->tx_tbl[i];
331
332 if (rsa->used)
333 ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
334 rsa->key, rsa->salt,
335 rsa->mode, rsa->iptbl_ind);
336
337 if (tsa->used)
338 ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
339 }
340}
341
342/**
343 * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
344 * @ipsec: pointer to ipsec struct
345 * @rxtable: true if we need to look in the Rx table
346 *
347 * Returns the first unused index in either the Rx or Tx SA table
348 **/
349static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
350{
351 u32 i;
352
353 if (rxtable) {
354 if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
355 return -ENOSPC;
356
357 /* search rx sa table */
358 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
359 if (!ipsec->rx_tbl[i].used)
360 return i;
361 }
362 } else {
363 if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
364 return -ENOSPC;
365
366 /* search tx sa table */
367 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
368 if (!ipsec->tx_tbl[i].used)
369 return i;
370 }
371 }
372
373 return -ENOSPC;
374}
375
376/**
377 * ixgbe_ipsec_find_rx_state - find the state that matches
378 * @ipsec: pointer to ipsec struct
379 * @daddr: inbound address to match
380 * @proto: protocol to match
381 * @spi: SPI to match
382 * @ip4: true if using an ipv4 address
383 *
384 * Returns a pointer to the matching SA state information
385 **/
386static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
387 __be32 *daddr, u8 proto,
388 __be32 spi, bool ip4)
389{
390 struct rx_sa *rsa;
391 struct xfrm_state *ret = NULL;
392
393 rcu_read_lock();
394 hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
395 if (spi == rsa->xs->id.spi &&
396 ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
397 (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
398 sizeof(rsa->xs->id.daddr.a6)))) &&
399 proto == rsa->xs->id.proto) {
400 ret = rsa->xs;
401 xfrm_state_hold(ret);
402 break;
403 }
404 rcu_read_unlock();
405 return ret;
406}
407
408/**
409 * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
410 * @xs: pointer to xfrm_state struct
411 * @mykey: pointer to key array to populate
412 * @mysalt: pointer to salt value to populate
413 *
414 * This copies the protocol keys and salt to our own data tables. The
415 * 82599 family only supports the one algorithm.
416 **/
417static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
418 u32 *mykey, u32 *mysalt)
419{
420 struct net_device *dev = xs->xso.dev;
421 unsigned char *key_data;
422 char *alg_name = NULL;
423 const char aes_gcm_name[] = "rfc4106(gcm(aes))";
424 int key_len;
425
426 if (xs->aead) {
427 key_data = &xs->aead->alg_key[0];
428 key_len = xs->aead->alg_key_len;
429 alg_name = xs->aead->alg_name;
430 } else {
431 netdev_err(dev, "Unsupported IPsec algorithm\n");
432 return -EINVAL;
433 }
434
435 if (strcmp(alg_name, aes_gcm_name)) {
436 netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
437 aes_gcm_name);
438 return -EINVAL;
439 }
440
441 /* The key bytes come down in a bigendian array of bytes, so
442 * we don't need to do any byteswapping.
443 * 160 accounts for 16 byte key and 4 byte salt
444 */
445 if (key_len == 160) {
446 *mysalt = ((u32 *)key_data)[4];
447 } else if (key_len != 128) {
448 netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
449 return -EINVAL;
450 } else {
451 netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
452 *mysalt = 0;
453 }
454 memcpy(mykey, key_data, 16);
455
456 return 0;
457}
458
459/**
460 * ixgbe_ipsec_add_sa - program device with a security association
461 * @xs: pointer to transformer state struct
462 **/
463static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
464{
465 struct net_device *dev = xs->xso.dev;
466 struct ixgbe_adapter *adapter = netdev_priv(dev);
467 struct ixgbe_ipsec *ipsec = adapter->ipsec;
468 struct ixgbe_hw *hw = &adapter->hw;
469 int checked, match, first;
470 u16 sa_idx;
471 int ret;
472 int i;
473
474 if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
475 netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
476 xs->id.proto);
477 return -EINVAL;
478 }
479
480 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
481 struct rx_sa rsa;
482
483 if (xs->calg) {
484 netdev_err(dev, "Compression offload not supported\n");
485 return -EINVAL;
486 }
487
488 /* find the first unused index */
489 ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
490 if (ret < 0) {
491 netdev_err(dev, "No space for SA in Rx table!\n");
492 return ret;
493 }
494 sa_idx = (u16)ret;
495
496 memset(&rsa, 0, sizeof(rsa));
497 rsa.used = true;
498 rsa.xs = xs;
499
500 if (rsa.xs->id.proto & IPPROTO_ESP)
501 rsa.decrypt = xs->ealg || xs->aead;
502
503 /* get the key and salt */
504 ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
505 if (ret) {
506 netdev_err(dev, "Failed to get key data for Rx SA table\n");
507 return ret;
508 }
509
510 /* get ip for rx sa table */
511 if (xs->props.family == AF_INET6)
512 memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
513 else
514 memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
515
516 /* The HW does not have a 1:1 mapping from keys to IP addrs, so
517 * check for a matching IP addr entry in the table. If the addr
518 * already exists, use it; else find an unused slot and add the
519 * addr. If one does not exist and there are no unused table
520 * entries, fail the request.
521 */
522
523 /* Find an existing match or first not used, and stop looking
524 * after we've checked all we know we have.
525 */
526 checked = 0;
527 match = -1;
528 first = -1;
529 for (i = 0;
530 i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
531 (checked < ipsec->num_rx_sa || first < 0);
532 i++) {
533 if (ipsec->ip_tbl[i].used) {
534 if (!memcmp(ipsec->ip_tbl[i].ipaddr,
535 rsa.ipaddr, sizeof(rsa.ipaddr))) {
536 match = i;
537 break;
538 }
539 checked++;
540 } else if (first < 0) {
541 first = i; /* track the first empty seen */
542 }
543 }
544
545 if (ipsec->num_rx_sa == 0)
546 first = 0;
547
548 if (match >= 0) {
549 /* addrs are the same, we should use this one */
550 rsa.iptbl_ind = match;
551 ipsec->ip_tbl[match].ref_cnt++;
552
553 } else if (first >= 0) {
554 /* no matches, but here's an empty slot */
555 rsa.iptbl_ind = first;
556
557 memcpy(ipsec->ip_tbl[first].ipaddr,
558 rsa.ipaddr, sizeof(rsa.ipaddr));
559 ipsec->ip_tbl[first].ref_cnt = 1;
560 ipsec->ip_tbl[first].used = true;
561
562 ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
563
564 } else {
565 /* no match and no empty slot */
566 netdev_err(dev, "No space for SA in Rx IP SA table\n");
567 memset(&rsa, 0, sizeof(rsa));
568 return -ENOSPC;
569 }
570
571 rsa.mode = IXGBE_RXMOD_VALID;
572 if (rsa.xs->id.proto & IPPROTO_ESP)
573 rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
574 if (rsa.decrypt)
575 rsa.mode |= IXGBE_RXMOD_DECRYPT;
576 if (rsa.xs->props.family == AF_INET6)
577 rsa.mode |= IXGBE_RXMOD_IPV6;
578
579 /* the preparations worked, so save the info */
580 memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
581
582 ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
583 rsa.salt, rsa.mode, rsa.iptbl_ind);
584 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
585
586 ipsec->num_rx_sa++;
587
588 /* hash the new entry for faster search in Rx path */
589 hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
590 rsa.xs->id.spi);
591 } else {
592 struct tx_sa tsa;
593
594 /* find the first unused index */
595 ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
596 if (ret < 0) {
597 netdev_err(dev, "No space for SA in Tx table\n");
598 return ret;
599 }
600 sa_idx = (u16)ret;
601
602 memset(&tsa, 0, sizeof(tsa));
603 tsa.used = true;
604 tsa.xs = xs;
605
606 if (xs->id.proto & IPPROTO_ESP)
607 tsa.encrypt = xs->ealg || xs->aead;
608
609 ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
610 if (ret) {
611 netdev_err(dev, "Failed to get key data for Tx SA table\n");
612 memset(&tsa, 0, sizeof(tsa));
613 return ret;
614 }
615
616 /* the preparations worked, so save the info */
617 memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
618
619 ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
620
621 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
622
623 ipsec->num_tx_sa++;
624 }
625
626 /* enable the engine if not already warmed up */
627 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
628 ixgbe_ipsec_start_engine(adapter);
629 adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
630 }
631
632 return 0;
633}
634
635/**
636 * ixgbe_ipsec_del_sa - clear out this specific SA
637 * @xs: pointer to transformer state struct
638 **/
639static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
640{
641 struct net_device *dev = xs->xso.dev;
642 struct ixgbe_adapter *adapter = netdev_priv(dev);
643 struct ixgbe_ipsec *ipsec = adapter->ipsec;
644 struct ixgbe_hw *hw = &adapter->hw;
645 u32 zerobuf[4] = {0, 0, 0, 0};
646 u16 sa_idx;
647
648 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
649 struct rx_sa *rsa;
650 u8 ipi;
651
652 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
653 rsa = &ipsec->rx_tbl[sa_idx];
654
655 if (!rsa->used) {
656 netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
657 sa_idx, xs->xso.offload_handle);
658 return;
659 }
660
661 ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
662 hash_del_rcu(&rsa->hlist);
663
664 /* if the IP table entry is referenced by only this SA,
665 * i.e. ref_cnt is only 1, clear the IP table entry as well
666 */
667 ipi = rsa->iptbl_ind;
668 if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
669 ipsec->ip_tbl[ipi].ref_cnt--;
670
671 if (!ipsec->ip_tbl[ipi].ref_cnt) {
672 memset(&ipsec->ip_tbl[ipi], 0,
673 sizeof(struct rx_ip_sa));
674 ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
675 }
676 }
677
678 memset(rsa, 0, sizeof(struct rx_sa));
679 ipsec->num_rx_sa--;
680 } else {
681 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
682
683 if (!ipsec->tx_tbl[sa_idx].used) {
684 netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
685 sa_idx, xs->xso.offload_handle);
686 return;
687 }
688
689 ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
690 memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
691 ipsec->num_tx_sa--;
692 }
693
694 /* if there are no SAs left, stop the engine to save energy */
695 if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
696 adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
697 ixgbe_ipsec_stop_engine(adapter);
698 }
699}
700
701/**
702 * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
703 * @skb: current data packet
704 * @xs: pointer to transformer state struct
705 **/
706static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
707{
708 if (xs->props.family == AF_INET) {
709 /* Offload with IPv4 options is not supported yet */
710 if (ip_hdr(skb)->ihl != 5)
711 return false;
712 } else {
713 /* Offload with IPv6 extension headers is not support yet */
714 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
715 return false;
716 }
717
718 return true;
719}
720
721/**
722 * ixgbe_ipsec_free - called by xfrm garbage collections
723 * @xs: pointer to transformer state struct
724 *
725 * We don't have any garbage to collect, so we shouldn't bother
726 * implementing this function, but the XFRM code doesn't check for
727 * existence before calling the API callback.
728 **/
729static void ixgbe_ipsec_free(struct xfrm_state *xs)
730{
731}
732
733static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
734 .xdo_dev_state_add = ixgbe_ipsec_add_sa,
735 .xdo_dev_state_delete = ixgbe_ipsec_del_sa,
736 .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
737 .xdo_dev_state_free = ixgbe_ipsec_free,
738};
739
740/**
741 * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
742 * @tx_ring: outgoing context
743 * @first: current data packet
744 * @itd: ipsec Tx data for later use in building context descriptor
745 **/
746int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
747 struct ixgbe_tx_buffer *first,
748 struct ixgbe_ipsec_tx_data *itd)
749{
750 struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
751 struct ixgbe_ipsec *ipsec = adapter->ipsec;
752 struct xfrm_state *xs;
753 struct tx_sa *tsa;
754
755 if (unlikely(!first->skb->sp->len)) {
756 netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
757 __func__, first->skb->sp->len);
758 return 0;
759 }
760
761 xs = xfrm_input_state(first->skb);
762 if (unlikely(!xs)) {
763 netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
764 __func__, xs);
765 return 0;
766 }
767
768 itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
769 if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
770 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
771 __func__, itd->sa_idx, xs->xso.offload_handle);
772 return 0;
773 }
774
775 tsa = &ipsec->tx_tbl[itd->sa_idx];
776 if (unlikely(!tsa->used)) {
777 netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
778 __func__, itd->sa_idx);
779 return 0;
780 }
781
782 first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
783
784 itd->flags = 0;
785 if (xs->id.proto == IPPROTO_ESP) {
786 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
787 IXGBE_ADVTXD_TUCMD_L4T_TCP;
788 if (first->protocol == htons(ETH_P_IP))
789 itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
790 itd->trailer_len = xs->props.trailer_len;
791 }
792 if (tsa->encrypt)
793 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
794
795 return 1;
796}
797
798/**
799 * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
800 * @rx_ring: receiving ring
801 * @rx_desc: receive data descriptor
802 * @skb: current data packet
803 *
804 * Determine if there was an ipsec encapsulation noticed, and if so set up
805 * the resulting status for later in the receive stack.
806 **/
807void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
808 union ixgbe_adv_rx_desc *rx_desc,
809 struct sk_buff *skb)
810{
811 struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
812 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
813 __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
814 IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
815 struct ixgbe_ipsec *ipsec = adapter->ipsec;
816 struct xfrm_offload *xo = NULL;
817 struct xfrm_state *xs = NULL;
818 struct ipv6hdr *ip6 = NULL;
819 struct iphdr *ip4 = NULL;
820 void *daddr;
821 __be32 spi;
822 u8 *c_hdr;
823 u8 proto;
824
825 /* Find the ip and crypto headers in the data.
826 * We can assume no vlan header in the way, b/c the
827 * hw won't recognize the IPsec packet and anyway the
828 * currently vlan device doesn't support xfrm offload.
829 */
830 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
831 ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
832 daddr = &ip4->daddr;
833 c_hdr = (u8 *)ip4 + ip4->ihl * 4;
834 } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
835 ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
836 daddr = &ip6->daddr;
837 c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
838 } else {
839 return;
840 }
841
842 switch (pkt_info & ipsec_pkt_types) {
843 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
844 spi = ((struct ip_auth_hdr *)c_hdr)->spi;
845 proto = IPPROTO_AH;
846 break;
847 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
848 spi = ((struct ip_esp_hdr *)c_hdr)->spi;
849 proto = IPPROTO_ESP;
850 break;
851 default:
852 return;
853 }
854
855 xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
856 if (unlikely(!xs))
857 return;
858
859 skb->sp = secpath_dup(skb->sp);
860 if (unlikely(!skb->sp))
861 return;
862
863 skb->sp->xvec[skb->sp->len++] = xs;
864 skb->sp->olen++;
865 xo = xfrm_offload(skb);
866 xo->flags = CRYPTO_DONE;
867 xo->status = CRYPTO_SUCCESS;
868
869 adapter->rx_ipsec++;
870}
871
872/**
873 * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
874 * @adapter: board private structure
875 **/
876void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
877{
878 struct ixgbe_ipsec *ipsec;
879 size_t size;
880
881 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
882 return;
883
884 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
885 if (!ipsec)
886 goto err1;
887 hash_init(ipsec->rx_sa_list);
888
889 size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
890 ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
891 if (!ipsec->rx_tbl)
892 goto err2;
893
894 size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
895 ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
896 if (!ipsec->tx_tbl)
897 goto err2;
898
899 size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
900 ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
901 if (!ipsec->ip_tbl)
902 goto err2;
903
904 ipsec->num_rx_sa = 0;
905 ipsec->num_tx_sa = 0;
906
907 adapter->ipsec = ipsec;
908 ixgbe_ipsec_stop_engine(adapter);
909 ixgbe_ipsec_clear_hw_tables(adapter);
910
911 adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
912 adapter->netdev->features |= NETIF_F_HW_ESP;
913 adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP;
914
915 return;
916
917err2:
918 kfree(ipsec->ip_tbl);
919 kfree(ipsec->rx_tbl);
920 kfree(ipsec->tx_tbl);
921err1:
922 kfree(adapter->ipsec);
923 netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
924}
925
926/**
927 * ixgbe_stop_ipsec_offload - tear down the ipsec offload
928 * @adapter: board private structure
929 **/
930void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
931{
932 struct ixgbe_ipsec *ipsec = adapter->ipsec;
933
934 adapter->ipsec = NULL;
935 if (ipsec) {
936 kfree(ipsec->ip_tbl);
937 kfree(ipsec->rx_tbl);
938 kfree(ipsec->tx_tbl);
939 kfree(ipsec);
940 }
941}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
new file mode 100644
index 000000000000..da3ce7849e85
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -0,0 +1,93 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program. If not, see <http://www.gnu.org/licenses/>.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _IXGBE_IPSEC_H_
29#define _IXGBE_IPSEC_H_
30
31#define IXGBE_IPSEC_MAX_SA_COUNT 1024
32#define IXGBE_IPSEC_MAX_RX_IP_COUNT 128
33#define IXGBE_IPSEC_BASE_RX_INDEX 0
34#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT
35
36#define IXGBE_RXTXIDX_IPS_EN 0x00000001
37#define IXGBE_RXIDX_TBL_SHIFT 1
38enum ixgbe_ipsec_tbl_sel {
39 ips_rx_ip_tbl = 0x01,
40 ips_rx_spi_tbl = 0x02,
41 ips_rx_key_tbl = 0x03,
42};
43
44#define IXGBE_RXTXIDX_IDX_SHIFT 3
45#define IXGBE_RXTXIDX_READ 0x40000000
46#define IXGBE_RXTXIDX_WRITE 0x80000000
47
48#define IXGBE_RXMOD_VALID 0x00000001
49#define IXGBE_RXMOD_PROTO_ESP 0x00000004
50#define IXGBE_RXMOD_DECRYPT 0x00000008
51#define IXGBE_RXMOD_IPV6 0x00000010
52
53struct rx_sa {
54 struct hlist_node hlist;
55 struct xfrm_state *xs;
56 __be32 ipaddr[4];
57 u32 key[4];
58 u32 salt;
59 u32 mode;
60 u8 iptbl_ind;
61 bool used;
62 bool decrypt;
63};
64
65struct rx_ip_sa {
66 __be32 ipaddr[4];
67 u32 ref_cnt;
68 bool used;
69};
70
71struct tx_sa {
72 struct xfrm_state *xs;
73 u32 key[4];
74 u32 salt;
75 bool encrypt;
76 bool used;
77};
78
79struct ixgbe_ipsec_tx_data {
80 u32 flags;
81 u16 trailer_len;
82 u16 sa_idx;
83};
84
85struct ixgbe_ipsec {
86 u16 num_rx_sa;
87 u16 num_tx_sa;
88 struct rx_ip_sa *ip_tbl;
89 struct rx_sa *rx_tbl;
90 struct tx_sa *tx_tbl;
91 DECLARE_HASHTABLE(rx_sa_list, 10);
92};
93#endif /* _IXGBE_IPSEC_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index b3c282d09b18..4242f0213e46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1282,7 +1282,7 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1282} 1282}
1283 1283
1284void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, 1284void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1285 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) 1285 u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
1286{ 1286{
1287 struct ixgbe_adv_tx_context_desc *context_desc; 1287 struct ixgbe_adv_tx_context_desc *context_desc;
1288 u16 i = tx_ring->next_to_use; 1288 u16 i = tx_ring->next_to_use;
@@ -1296,7 +1296,7 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1296 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 1296 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1297 1297
1298 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 1298 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1299 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); 1299 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
1300 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 1300 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1301 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1301 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1302} 1302}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 4f28621b76e1..722cc3153a99 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1171,7 +1171,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1171 struct ixgbe_adapter *adapter = q_vector->adapter; 1171 struct ixgbe_adapter *adapter = q_vector->adapter;
1172 struct ixgbe_tx_buffer *tx_buffer; 1172 struct ixgbe_tx_buffer *tx_buffer;
1173 union ixgbe_adv_tx_desc *tx_desc; 1173 union ixgbe_adv_tx_desc *tx_desc;
1174 unsigned int total_bytes = 0, total_packets = 0; 1174 unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
1175 unsigned int budget = q_vector->tx.work_limit; 1175 unsigned int budget = q_vector->tx.work_limit;
1176 unsigned int i = tx_ring->next_to_clean; 1176 unsigned int i = tx_ring->next_to_clean;
1177 1177
@@ -1202,6 +1202,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1202 /* update the statistics for this packet */ 1202 /* update the statistics for this packet */
1203 total_bytes += tx_buffer->bytecount; 1203 total_bytes += tx_buffer->bytecount;
1204 total_packets += tx_buffer->gso_segs; 1204 total_packets += tx_buffer->gso_segs;
1205 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
1206 total_ipsec++;
1205 1207
1206 /* free the skb */ 1208 /* free the skb */
1207 if (ring_is_xdp(tx_ring)) 1209 if (ring_is_xdp(tx_ring))
@@ -1264,6 +1266,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1264 u64_stats_update_end(&tx_ring->syncp); 1266 u64_stats_update_end(&tx_ring->syncp);
1265 q_vector->tx.total_bytes += total_bytes; 1267 q_vector->tx.total_bytes += total_bytes;
1266 q_vector->tx.total_packets += total_packets; 1268 q_vector->tx.total_packets += total_packets;
1269 adapter->tx_ipsec += total_ipsec;
1267 1270
1268 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { 1271 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
1269 /* schedule immediate reset if we believe we hung */ 1272 /* schedule immediate reset if we believe we hung */
@@ -1752,6 +1755,9 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1752 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1755 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1753 } 1756 }
1754 1757
1758 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
1759 ixgbe_ipsec_rx(rx_ring, rx_desc, skb);
1760
1755 skb->protocol = eth_type_trans(skb, dev); 1761 skb->protocol = eth_type_trans(skb, dev);
1756 1762
1757 /* record Rx queue, or update MACVLAN statistics */ 1763 /* record Rx queue, or update MACVLAN statistics */
@@ -5425,6 +5431,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
5425 5431
5426 ixgbe_set_rx_mode(adapter->netdev); 5432 ixgbe_set_rx_mode(adapter->netdev);
5427 ixgbe_restore_vlan(adapter); 5433 ixgbe_restore_vlan(adapter);
5434 ixgbe_ipsec_restore(adapter);
5428 5435
5429 switch (hw->mac.type) { 5436 switch (hw->mac.type) {
5430 case ixgbe_mac_82599EB: 5437 case ixgbe_mac_82599EB:
@@ -7795,10 +7802,12 @@ static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
7795} 7802}
7796 7803
7797static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, 7804static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
7798 struct ixgbe_tx_buffer *first) 7805 struct ixgbe_tx_buffer *first,
7806 struct ixgbe_ipsec_tx_data *itd)
7799{ 7807{
7800 struct sk_buff *skb = first->skb; 7808 struct sk_buff *skb = first->skb;
7801 u32 vlan_macip_lens = 0; 7809 u32 vlan_macip_lens = 0;
7810 u32 fceof_saidx = 0;
7802 u32 type_tucmd = 0; 7811 u32 type_tucmd = 0;
7803 7812
7804 if (skb->ip_summed != CHECKSUM_PARTIAL) { 7813 if (skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -7839,7 +7848,12 @@ no_csum:
7839 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 7848 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7840 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 7849 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
7841 7850
7842 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); 7851 if (first->tx_flags & IXGBE_TX_FLAGS_IPSEC) {
7852 fceof_saidx |= itd->sa_idx;
7853 type_tucmd |= itd->flags | itd->trailer_len;
7854 }
7855
7856 ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, 0);
7843} 7857}
7844 7858
7845#define IXGBE_SET_FLAG(_input, _flag, _result) \ 7859#define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -7882,11 +7896,16 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
7882 IXGBE_TX_FLAGS_CSUM, 7896 IXGBE_TX_FLAGS_CSUM,
7883 IXGBE_ADVTXD_POPTS_TXSM); 7897 IXGBE_ADVTXD_POPTS_TXSM);
7884 7898
7885 /* enble IPv4 checksum for TSO */ 7899 /* enable IPv4 checksum for TSO */
7886 olinfo_status |= IXGBE_SET_FLAG(tx_flags, 7900 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7887 IXGBE_TX_FLAGS_IPV4, 7901 IXGBE_TX_FLAGS_IPV4,
7888 IXGBE_ADVTXD_POPTS_IXSM); 7902 IXGBE_ADVTXD_POPTS_IXSM);
7889 7903
7904 /* enable IPsec */
7905 olinfo_status |= IXGBE_SET_FLAG(tx_flags,
7906 IXGBE_TX_FLAGS_IPSEC,
7907 IXGBE_ADVTXD_POPTS_IPSEC);
7908
7890 /* 7909 /*
7891 * Check Context must be set if Tx switch is enabled, which it 7910 * Check Context must be set if Tx switch is enabled, which it
7892 * always is for case where virtual functions are running 7911 * always is for case where virtual functions are running
@@ -8350,6 +8369,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8350 u32 tx_flags = 0; 8369 u32 tx_flags = 0;
8351 unsigned short f; 8370 unsigned short f;
8352 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 8371 u16 count = TXD_USE_COUNT(skb_headlen(skb));
8372 struct ixgbe_ipsec_tx_data ipsec_tx = { 0 };
8353 __be16 protocol = skb->protocol; 8373 __be16 protocol = skb->protocol;
8354 u8 hdr_len = 0; 8374 u8 hdr_len = 0;
8355 8375
@@ -8454,11 +8474,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8454 } 8474 }
8455 8475
8456#endif /* IXGBE_FCOE */ 8476#endif /* IXGBE_FCOE */
8477
8478#ifdef CONFIG_XFRM_OFFLOAD
8479 if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8480 goto out_drop;
8481#endif
8457 tso = ixgbe_tso(tx_ring, first, &hdr_len); 8482 tso = ixgbe_tso(tx_ring, first, &hdr_len);
8458 if (tso < 0) 8483 if (tso < 0)
8459 goto out_drop; 8484 goto out_drop;
8460 else if (!tso) 8485 else if (!tso)
8461 ixgbe_tx_csum(tx_ring, first); 8486 ixgbe_tx_csum(tx_ring, first, &ipsec_tx);
8462 8487
8463 /* add the ATR filter if ATR is on */ 8488 /* add the ATR filter if ATR is on */
8464 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) 8489 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
@@ -9870,6 +9895,12 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
9870 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) 9895 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
9871 features &= ~NETIF_F_TSO; 9896 features &= ~NETIF_F_TSO;
9872 9897
9898#ifdef CONFIG_XFRM_OFFLOAD
9899 /* IPsec offload doesn't get along well with others *yet* */
9900 if (skb->sp)
9901 features &= ~(NETIF_F_TSO | NETIF_F_HW_CSUM);
9902#endif
9903
9873 return features; 9904 return features;
9874} 9905}
9875 9906
@@ -10459,6 +10490,7 @@ skip_sriov:
10459 NETIF_F_FCOE_MTU; 10490 NETIF_F_FCOE_MTU;
10460 } 10491 }
10461#endif /* IXGBE_FCOE */ 10492#endif /* IXGBE_FCOE */
10493 ixgbe_init_ipsec_offload(adapter);
10462 10494
10463 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 10495 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
10464 netdev->hw_features |= NETIF_F_LRO; 10496 netdev->hw_features |= NETIF_F_LRO;
@@ -10694,6 +10726,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
10694 if (netdev->reg_state == NETREG_REGISTERED) 10726 if (netdev->reg_state == NETREG_REGISTERED)
10695 unregister_netdev(netdev); 10727 unregister_netdev(netdev);
10696 10728
10729 ixgbe_stop_ipsec_offload(adapter);
10697 ixgbe_clear_interrupt_scheme(adapter); 10730 ixgbe_clear_interrupt_scheme(adapter);
10698 10731
10699 ixgbe_release_hw_control(adapter); 10732 ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 21eb79ae3c30..ca45359686d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2360,11 +2360,6 @@ enum {
2360#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ 2360#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
2361#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ 2361#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
2362 2362
2363#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000
2364#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
2365#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
2366#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000
2367#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
2368/* Multiple Transmit Queue Command Register */ 2363/* Multiple Transmit Queue Command Register */
2369#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ 2364#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
2370#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ 2365#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
@@ -2416,6 +2411,9 @@ enum {
2416#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ 2411#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
2417#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ 2412#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
2418#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ 2413#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
2414#define IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL 0x08000000 /* overlap ERR_PE */
2415#define IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH 0x10000000 /* overlap ERR_OSE */
2416#define IXGBE_RXDADV_ERR_IPSEC_AUTH_FAILED 0x18000000
2419#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ 2417#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
2420#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ 2418#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
2421#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ 2419#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
@@ -2437,6 +2435,7 @@ enum {
2437#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ 2435#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
2438#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ 2436#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
2439#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */ 2437#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */
2438#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */
2440 2439
2441/* PSRTYPE bit definitions */ 2440/* PSRTYPE bit definitions */
2442#define IXGBE_PSRTYPE_TCPHDR 0x00000010 2441#define IXGBE_PSRTYPE_TCPHDR 0x00000010
@@ -2503,13 +2502,6 @@ enum {
2503#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ 2502#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
2504#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ 2503#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
2505 2504
2506/* Security Processing bit Indication */
2507#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000
2508#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
2509#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
2510#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
2511#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
2512
2513/* Masks to determine if packets should be dropped due to frame errors */ 2505/* Masks to determine if packets should be dropped due to frame errors */
2514#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ 2506#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
2515 IXGBE_RXD_ERR_CE | \ 2507 IXGBE_RXD_ERR_CE | \
@@ -2523,6 +2515,8 @@ enum {
2523 IXGBE_RXDADV_ERR_LE | \ 2515 IXGBE_RXDADV_ERR_LE | \
2524 IXGBE_RXDADV_ERR_PE | \ 2516 IXGBE_RXDADV_ERR_PE | \
2525 IXGBE_RXDADV_ERR_OSE | \ 2517 IXGBE_RXDADV_ERR_OSE | \
2518 IXGBE_RXDADV_ERR_IPSEC_INV_PROTOCOL | \
2519 IXGBE_RXDADV_ERR_IPSEC_INV_LENGTH | \
2526 IXGBE_RXDADV_ERR_USE) 2520 IXGBE_RXDADV_ERR_USE)
2527 2521
2528/* Multicast bit mask */ 2522/* Multicast bit mask */
@@ -2901,7 +2895,7 @@ union ixgbe_adv_rx_desc {
2901/* Context descriptors */ 2895/* Context descriptors */
2902struct ixgbe_adv_tx_context_desc { 2896struct ixgbe_adv_tx_context_desc {
2903 __le32 vlan_macip_lens; 2897 __le32 vlan_macip_lens;
2904 __le32 seqnum_seed; 2898 __le32 fceof_saidx;
2905 __le32 type_tucmd_mlhl; 2899 __le32 type_tucmd_mlhl;
2906 __le32 mss_l4len_idx; 2900 __le32 mss_l4len_idx;
2907}; 2901};
@@ -2932,6 +2926,7 @@ struct ixgbe_adv_tx_context_desc {
2932 IXGBE_ADVTXD_POPTS_SHIFT) 2926 IXGBE_ADVTXD_POPTS_SHIFT)
2933#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ 2927#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
2934 IXGBE_ADVTXD_POPTS_SHIFT) 2928 IXGBE_ADVTXD_POPTS_SHIFT)
2929#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
2935#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ 2930#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
2936#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ 2931#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
2937#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ 2932#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2947,7 +2942,6 @@ struct ixgbe_adv_tx_context_desc {
2947#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ 2942#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
2948#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ 2943#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */
2949#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ 2944#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
2950#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
2951#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ 2945#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
2952#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ 2946#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
2953#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ 2947#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */