aboutsummaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2015-11-23 03:04:05 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-11-23 03:04:05 -0500
commit92907cbbef8625bb3998d1eb385fc88f23c97a3f (patch)
tree15626ff9287e37c3cb81c7286d6db5a7fd77c854 /include/net
parent15fbfccfe92c62ae8d1ecc647c44157ed01ac02e (diff)
parent1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff)
Merge tag 'v4.4-rc2' into drm-intel-next-queued
Linux 4.4-rc2 Backmerge to get at commit 1b0e3a049efe471c399674fd954500ce97438d30 Author: Imre Deak <imre.deak@intel.com> Date: Thu Nov 5 23:04:11 2015 +0200 drm/i915/skl: disable display side power well support for now so that we can proplery re-eanble skl power wells in -next. Conflicts are just adjacent lines changed, except for intel_fbdev.c where we need to interleave the changs. Nothing nefarious. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/6lowpan.h360
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/af_ieee802154.h2
-rw-r--r--include/net/af_unix.h6
-rw-r--r--include/net/af_vsock.h3
-rw-r--r--include/net/bluetooth/bluetooth.h30
-rw-r--r--include/net/bluetooth/hci.h14
-rw-r--r--include/net/bluetooth/hci_core.h37
-rw-r--r--include/net/bluetooth/hci_mon.h10
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/bond_3ad.h3
-rw-r--r--include/net/cfg80211.h142
-rw-r--r--include/net/cfg802154.h164
-rw-r--r--include/net/dn_neigh.h6
-rw-r--r--include/net/dsa.h31
-rw-r--r--include/net/dst.h23
-rw-r--r--include/net/dst_metadata.h33
-rw-r--r--include/net/dst_ops.h3
-rw-r--r--include/net/ethoc.h1
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/genetlink.h5
-rw-r--r--include/net/ieee802154_netdev.h86
-rw-r--r--include/net/inet6_connection_sock.h13
-rw-r--r--include/net/inet_common.h3
-rw-r--r--include/net/inet_connection_sock.h35
-rw-r--r--include/net/inet_frag.h15
-rw-r--r--include/net/inet_hashtables.h5
-rw-r--r--include/net/inet_sock.h15
-rw-r--r--include/net/inet_timewait_sock.h6
-rw-r--r--include/net/ip.h41
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ip6_tunnel.h5
-rw-r--r--include/net/ip_fib.h18
-rw-r--r--include/net/ip_tunnels.h3
-rw-r--r--include/net/ip_vs.h299
-rw-r--r--include/net/ipv6.h13
-rw-r--r--include/net/iucv/iucv.h20
-rw-r--r--include/net/l3mdev.h222
-rw-r--r--include/net/lwtunnel.h8
-rw-r--r--include/net/mac80211.h49
-rw-r--r--include/net/mac802154.h33
-rw-r--r--include/net/mpls_iptunnel.h2
-rw-r--r--include/net/ndisc.h6
-rw-r--r--include/net/netfilter/br_netfilter.h8
-rw-r--r--include/net/netfilter/ipv4/nf_dup_ipv4.h2
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h2
-rw-r--r--include/net/netfilter/ipv6/nf_defrag_ipv6.h2
-rw-r--r--include/net/netfilter/ipv6/nf_dup_ipv6.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h7
-rw-r--r--include/net/netfilter/nf_conntrack_core.h1
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h2
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h25
-rw-r--r--include/net/netfilter/nf_nat_core.h2
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h32
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h30
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h3
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h3
-rw-r--r--include/net/netfilter/nfnetlink_queue.h51
-rw-r--r--include/net/netlink.h18
-rw-r--r--include/net/nfc/nci.h8
-rw-r--r--include/net/nfc/nci_core.h24
-rw-r--r--include/net/nfc/nfc.h2
-rw-r--r--include/net/nl802154.h191
-rw-r--r--include/net/request_sock.h186
-rw-r--r--include/net/route.h23
-rw-r--r--include/net/rtnetlink.h6
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sock.h122
-rw-r--r--include/net/switchdev.h163
-rw-r--r--include/net/tc_act/tc_connmark.h1
-rw-r--r--include/net/tcp.h94
-rw-r--r--include/net/tso.h1
-rw-r--r--include/net/vrf.h178
-rw-r--r--include/net/vxlan.h14
-rw-r--r--include/net/xfrm.h8
77 files changed, 1759 insertions, 1241 deletions
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index a2f59ec98d24..cf3bc564ac03 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -56,146 +56,37 @@
56#include <net/ipv6.h> 56#include <net/ipv6.h>
57#include <net/net_namespace.h> 57#include <net/net_namespace.h>
58 58
59#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */ 59#define EUI64_ADDR_LEN 8
60#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
61#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
62#define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */
63 60
64/* 61#define LOWPAN_NHC_MAX_ID_LEN 1
65 * ipv6 address based on mac 62/* Maximum next header compression length which we currently support inclusive
66 * second bit-flip (Universe/Local) is done according RFC2464 63 * possible inline data.
67 */ 64 */
68#define is_addr_mac_addr_based(a, m) \ 65#define LOWPAN_NHC_MAX_HDR_LEN (sizeof(struct udphdr))
69 ((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \ 66/* Max IPHC Header len without IPv6 hdr specific inline data.
70 (((a)->s6_addr[9]) == (m)[1]) && \ 67 * Useful for getting the "extra" bytes we need at worst case compression.
71 (((a)->s6_addr[10]) == (m)[2]) && \ 68 *
72 (((a)->s6_addr[11]) == (m)[3]) && \ 69 * LOWPAN_IPHC + CID + LOWPAN_NHC_MAX_ID_LEN
73 (((a)->s6_addr[12]) == (m)[4]) && \
74 (((a)->s6_addr[13]) == (m)[5]) && \
75 (((a)->s6_addr[14]) == (m)[6]) && \
76 (((a)->s6_addr[15]) == (m)[7]))
77
78/*
79 * check whether we can compress the IID to 16 bits,
80 * it's possible for unicast adresses with first 49 bits are zero only.
81 */
82#define lowpan_is_iid_16_bit_compressable(a) \
83 ((((a)->s6_addr16[4]) == 0) && \
84 (((a)->s6_addr[10]) == 0) && \
85 (((a)->s6_addr[11]) == 0xff) && \
86 (((a)->s6_addr[12]) == 0xfe) && \
87 (((a)->s6_addr[13]) == 0))
88
89/* check whether the 112-bit gid of the multicast address is mappable to: */
90
91/* 48 bits, FFXX::00XX:XXXX:XXXX */
92#define lowpan_is_mcast_addr_compressable48(a) \
93 ((((a)->s6_addr16[1]) == 0) && \
94 (((a)->s6_addr16[2]) == 0) && \
95 (((a)->s6_addr16[3]) == 0) && \
96 (((a)->s6_addr16[4]) == 0) && \
97 (((a)->s6_addr[10]) == 0))
98
99/* 32 bits, FFXX::00XX:XXXX */
100#define lowpan_is_mcast_addr_compressable32(a) \
101 ((((a)->s6_addr16[1]) == 0) && \
102 (((a)->s6_addr16[2]) == 0) && \
103 (((a)->s6_addr16[3]) == 0) && \
104 (((a)->s6_addr16[4]) == 0) && \
105 (((a)->s6_addr16[5]) == 0) && \
106 (((a)->s6_addr[12]) == 0))
107
108/* 8 bits, FF02::00XX */
109#define lowpan_is_mcast_addr_compressable8(a) \
110 ((((a)->s6_addr[1]) == 2) && \
111 (((a)->s6_addr16[1]) == 0) && \
112 (((a)->s6_addr16[2]) == 0) && \
113 (((a)->s6_addr16[3]) == 0) && \
114 (((a)->s6_addr16[4]) == 0) && \
115 (((a)->s6_addr16[5]) == 0) && \
116 (((a)->s6_addr16[6]) == 0) && \
117 (((a)->s6_addr[14]) == 0))
118
119#define lowpan_is_addr_broadcast(a) \
120 ((((a)[0]) == 0xFF) && \
121 (((a)[1]) == 0xFF) && \
122 (((a)[2]) == 0xFF) && \
123 (((a)[3]) == 0xFF) && \
124 (((a)[4]) == 0xFF) && \
125 (((a)[5]) == 0xFF) && \
126 (((a)[6]) == 0xFF) && \
127 (((a)[7]) == 0xFF))
128
129#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
130#define LOWPAN_DISPATCH_HC1 0x42 /* 01000010 = 66 */
131#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
132#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */
133#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */
134
135#define LOWPAN_DISPATCH_MASK 0xf8 /* 11111000 */
136
137#define LOWPAN_FRAG_TIMEOUT (HZ * 60) /* time-out 60 sec */
138
139#define LOWPAN_FRAG1_HEAD_SIZE 0x4
140#define LOWPAN_FRAGN_HEAD_SIZE 0x5
141
142/*
143 * Values of fields within the IPHC encoding first byte
144 * (C stands for compressed and I for inline)
145 */ 70 */
146#define LOWPAN_IPHC_TF 0x18 71#define LOWPAN_IPHC_MAX_HEADER_LEN (2 + 1 + LOWPAN_NHC_MAX_ID_LEN)
147 72/* Maximum worst case IPHC header buffer size */
148#define LOWPAN_IPHC_FL_C 0x10 73#define LOWPAN_IPHC_MAX_HC_BUF_LEN (sizeof(struct ipv6hdr) + \
149#define LOWPAN_IPHC_TC_C 0x08 74 LOWPAN_IPHC_MAX_HEADER_LEN + \
150#define LOWPAN_IPHC_NH_C 0x04 75 LOWPAN_NHC_MAX_HDR_LEN)
151#define LOWPAN_IPHC_TTL_1 0x01
152#define LOWPAN_IPHC_TTL_64 0x02
153#define LOWPAN_IPHC_TTL_255 0x03
154#define LOWPAN_IPHC_TTL_I 0x00
155
156 76
157/* Values of fields within the IPHC encoding second byte */ 77#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
158#define LOWPAN_IPHC_CID 0x80 78#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
79#define LOWPAN_DISPATCH_IPHC_MASK 0xe0
159 80
160#define LOWPAN_IPHC_ADDR_00 0x00 81static inline bool lowpan_is_ipv6(u8 dispatch)
161#define LOWPAN_IPHC_ADDR_01 0x01 82{
162#define LOWPAN_IPHC_ADDR_02 0x02 83 return dispatch == LOWPAN_DISPATCH_IPV6;
163#define LOWPAN_IPHC_ADDR_03 0x03 84}
164
165#define LOWPAN_IPHC_SAC 0x40
166#define LOWPAN_IPHC_SAM 0x30
167
168#define LOWPAN_IPHC_SAM_BIT 4
169
170#define LOWPAN_IPHC_M 0x08
171#define LOWPAN_IPHC_DAC 0x04
172#define LOWPAN_IPHC_DAM_00 0x00
173#define LOWPAN_IPHC_DAM_01 0x01
174#define LOWPAN_IPHC_DAM_10 0x02
175#define LOWPAN_IPHC_DAM_11 0x03
176
177#define LOWPAN_IPHC_DAM_BIT 0
178/*
179 * LOWPAN_UDP encoding (works together with IPHC)
180 */
181#define LOWPAN_NHC_UDP_MASK 0xF8
182#define LOWPAN_NHC_UDP_ID 0xF0
183#define LOWPAN_NHC_UDP_CHECKSUMC 0x04
184#define LOWPAN_NHC_UDP_CHECKSUMI 0x00
185
186#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0
187#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0
188#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000
189#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00
190 85
191/* values for port compression, _with checksum_ ie bit 5 set to 0 */ 86static inline bool lowpan_is_iphc(u8 dispatch)
192#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */ 87{
193#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline, 88 return (dispatch & LOWPAN_DISPATCH_IPHC_MASK) == LOWPAN_DISPATCH_IPHC;
194 dest = 0xF0 + 8 bit inline */ 89}
195#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
196 dest = 16 bit inline */
197#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
198#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
199 90
200#define LOWPAN_PRIV_SIZE(llpriv_size) \ 91#define LOWPAN_PRIV_SIZE(llpriv_size) \
201 (sizeof(struct lowpan_priv) + llpriv_size) 92 (sizeof(struct lowpan_priv) + llpriv_size)
@@ -218,10 +109,23 @@ struct lowpan_priv *lowpan_priv(const struct net_device *dev)
218 return netdev_priv(dev); 109 return netdev_priv(dev);
219} 110}
220 111
112struct lowpan_802154_cb {
113 u16 d_tag;
114 unsigned int d_size;
115 u8 d_offset;
116};
117
118static inline
119struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb)
120{
121 BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(skb->cb));
122 return (struct lowpan_802154_cb *)skb->cb;
123}
124
221#ifdef DEBUG 125#ifdef DEBUG
222/* print data in line */ 126/* print data in line */
223static inline void raw_dump_inline(const char *caller, char *msg, 127static inline void raw_dump_inline(const char *caller, char *msg,
224 unsigned char *buf, int len) 128 const unsigned char *buf, int len)
225{ 129{
226 if (msg) 130 if (msg)
227 pr_debug("%s():%s: ", caller, msg); 131 pr_debug("%s():%s: ", caller, msg);
@@ -236,7 +140,7 @@ static inline void raw_dump_inline(const char *caller, char *msg,
236 * ... 140 * ...
237 */ 141 */
238static inline void raw_dump_table(const char *caller, char *msg, 142static inline void raw_dump_table(const char *caller, char *msg,
239 unsigned char *buf, int len) 143 const unsigned char *buf, int len)
240{ 144{
241 if (msg) 145 if (msg)
242 pr_debug("%s():%s:\n", caller, msg); 146 pr_debug("%s():%s:\n", caller, msg);
@@ -245,24 +149,25 @@ static inline void raw_dump_table(const char *caller, char *msg,
245} 149}
246#else 150#else
247static inline void raw_dump_table(const char *caller, char *msg, 151static inline void raw_dump_table(const char *caller, char *msg,
248 unsigned char *buf, int len) { } 152 const unsigned char *buf, int len) { }
249static inline void raw_dump_inline(const char *caller, char *msg, 153static inline void raw_dump_inline(const char *caller, char *msg,
250 unsigned char *buf, int len) { } 154 const unsigned char *buf, int len) { }
251#endif 155#endif
252 156
253static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val) 157/**
254{ 158 * lowpan_fetch_skb - getting inline data from 6LoWPAN header
255 if (unlikely(!pskb_may_pull(skb, 1))) 159 *
256 return -EINVAL; 160 * This function will pull data from sk buffer and put it into data to
257 161 * remove the 6LoWPAN inline data. This function returns true if the
258 *val = skb->data[0]; 162 * sk buffer is too small to pull the amount of data which is specified
259 skb_pull(skb, 1); 163 * by len.
260 164 *
261 return 0; 165 * @skb: the buffer where the inline data should be pulled from.
262} 166 * @data: destination buffer for the inline data.
263 167 * @len: amount of data which should be pulled in bytes.
264static inline bool lowpan_fetch_skb(struct sk_buff *skb, 168 */
265 void *data, const unsigned int len) 169static inline bool lowpan_fetch_skb(struct sk_buff *skb, void *data,
170 unsigned int len)
266{ 171{
267 if (unlikely(!pskb_may_pull(skb, len))) 172 if (unlikely(!pskb_may_pull(skb, len)))
268 return true; 173 return true;
@@ -280,129 +185,44 @@ static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
280 *hc_ptr += len; 185 *hc_ptr += len;
281} 186}
282 187
283static inline u8 lowpan_addr_mode_size(const u8 addr_mode) 188void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
284{
285 static const u8 addr_sizes[] = {
286 [LOWPAN_IPHC_ADDR_00] = 16,
287 [LOWPAN_IPHC_ADDR_01] = 8,
288 [LOWPAN_IPHC_ADDR_02] = 2,
289 [LOWPAN_IPHC_ADDR_03] = 0,
290 };
291 return addr_sizes[addr_mode];
292}
293
294static inline u8 lowpan_next_hdr_size(const u8 h_enc, u16 *uncomp_header)
295{
296 u8 ret = 1;
297
298 if ((h_enc & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
299 *uncomp_header += sizeof(struct udphdr);
300
301 switch (h_enc & LOWPAN_NHC_UDP_CS_P_11) {
302 case LOWPAN_NHC_UDP_CS_P_00:
303 ret += 4;
304 break;
305 case LOWPAN_NHC_UDP_CS_P_01:
306 case LOWPAN_NHC_UDP_CS_P_10:
307 ret += 3;
308 break;
309 case LOWPAN_NHC_UDP_CS_P_11:
310 ret++;
311 break;
312 default:
313 break;
314 }
315
316 if (!(h_enc & LOWPAN_NHC_UDP_CS_C))
317 ret += 2;
318 }
319
320 return ret;
321}
322 189
323/** 190/**
324 * lowpan_uncompress_size - returns skb->len size with uncompressed header 191 * lowpan_header_decompress - replace 6LoWPAN header with IPv6 header
325 * @skb: sk_buff with 6lowpan header inside
326 * @datagram_offset: optional to get the datagram_offset value
327 * 192 *
328 * Returns the skb->len with uncompressed header 193 * This function replaces the IPHC 6LoWPAN header which should be pointed at
194 * skb->data and skb_network_header, with the IPv6 header.
195 * It would be nice that the caller have the necessary headroom of IPv6 header
196 * and greatest Transport layer header, this would reduce the overhead for
197 * reallocate headroom.
198 *
199 * @skb: the buffer which should be manipulate.
200 * @dev: the lowpan net device pointer.
201 * @daddr: destination lladdr of mac header which is used for compression
202 * methods.
203 * @saddr: source lladdr of mac header which is used for compression
204 * methods.
329 */ 205 */
330static inline u16 206int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
331lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset) 207 const void *daddr, const void *saddr);
332{
333 u16 ret = 2, uncomp_header = sizeof(struct ipv6hdr);
334 u8 iphc0, iphc1, h_enc;
335
336 iphc0 = skb_network_header(skb)[0];
337 iphc1 = skb_network_header(skb)[1];
338
339 switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
340 case 0:
341 ret += 4;
342 break;
343 case 1:
344 ret += 3;
345 break;
346 case 2:
347 ret++;
348 break;
349 default:
350 break;
351 }
352
353 if (!(iphc0 & LOWPAN_IPHC_NH_C))
354 ret++;
355
356 if (!(iphc0 & 0x03))
357 ret++;
358
359 ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_SAM) >>
360 LOWPAN_IPHC_SAM_BIT);
361
362 if (iphc1 & LOWPAN_IPHC_M) {
363 switch ((iphc1 & LOWPAN_IPHC_DAM_11) >>
364 LOWPAN_IPHC_DAM_BIT) {
365 case LOWPAN_IPHC_DAM_00:
366 ret += 16;
367 break;
368 case LOWPAN_IPHC_DAM_01:
369 ret += 6;
370 break;
371 case LOWPAN_IPHC_DAM_10:
372 ret += 4;
373 break;
374 case LOWPAN_IPHC_DAM_11:
375 ret++;
376 break;
377 default:
378 break;
379 }
380 } else {
381 ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_DAM_11) >>
382 LOWPAN_IPHC_DAM_BIT);
383 }
384
385 if (iphc0 & LOWPAN_IPHC_NH_C) {
386 h_enc = skb_network_header(skb)[ret];
387 ret += lowpan_next_hdr_size(h_enc, &uncomp_header);
388 }
389 208
390 if (dgram_offset) 209/**
391 *dgram_offset = uncomp_header; 210 * lowpan_header_compress - replace IPv6 header with 6LoWPAN header
392 211 *
393 return skb->len + uncomp_header - ret; 212 * This function replaces the IPv6 header which should be pointed at
394} 213 * skb->data and skb_network_header, with the IPHC 6LoWPAN header.
395 214 * The caller need to be sure that the sk buffer is not shared and at have
396void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype); 215 * at least a headroom which is smaller or equal LOWPAN_IPHC_MAX_HEADER_LEN,
397 216 * which is the IPHC "more bytes than IPv6 header" at worst case.
398int 217 *
399lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev, 218 * @skb: the buffer which should be manipulate.
400 const u8 *saddr, const u8 saddr_type, 219 * @dev: the lowpan net device pointer.
401 const u8 saddr_len, const u8 *daddr, 220 * @daddr: destination lladdr of mac header which is used for compression
402 const u8 daddr_type, const u8 daddr_len, 221 * methods.
403 u8 iphc0, u8 iphc1); 222 * @saddr: source lladdr of mac header which is used for compression
404int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, 223 * methods.
405 unsigned short type, const void *_daddr, 224 */
406 const void *_saddr, unsigned int len); 225int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
226 const void *daddr, const void *saddr);
407 227
408#endif /* __6LOWPAN_H__ */ 228#endif /* __6LOWPAN_H__ */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index b5474b1fcd83..78003dfb8539 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -192,8 +192,7 @@ struct ipv6_stub {
192 int (*ipv6_dst_lookup)(struct net *net, struct sock *sk, 192 int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
193 struct dst_entry **dst, struct flowi6 *fl6); 193 struct dst_entry **dst, struct flowi6 *fl6);
194 void (*udpv6_encap_enable)(void); 194 void (*udpv6_encap_enable)(void);
195 void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh, 195 void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
196 const struct in6_addr *daddr,
197 const struct in6_addr *solicited_addr, 196 const struct in6_addr *solicited_addr,
198 bool router, bool solicited, bool override, bool inc_opt); 197 bool router, bool solicited, bool override, bool inc_opt);
199 struct neigh_table *nd_tbl; 198 struct neigh_table *nd_tbl;
diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h
index 7d38e2ffd256..a5563d27a3eb 100644
--- a/include/net/af_ieee802154.h
+++ b/include/net/af_ieee802154.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * IEEE 802.15.4 inteface for userspace 2 * IEEE 802.15.4 interface for userspace
3 * 3 *
4 * Copyright 2007, 2008 Siemens AG 4 * Copyright 2007, 2008 Siemens AG
5 * 5 *
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 4a167b30a12f..b36d837c701e 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -63,7 +63,11 @@ struct unix_sock {
63#define UNIX_GC_MAYBE_CYCLE 1 63#define UNIX_GC_MAYBE_CYCLE 1
64 struct socket_wq peer_wq; 64 struct socket_wq peer_wq;
65}; 65};
66#define unix_sk(__sk) ((struct unix_sock *)__sk) 66
67static inline struct unix_sock *unix_sk(const struct sock *sk)
68{
69 return (struct unix_sock *)sk;
70}
67 71
68#define peer_wait peer_wq.wait 72#define peer_wait peer_wq.wait
69 73
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index db639a4c5ab8..e9eb2d6791b3 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -22,6 +22,9 @@
22 22
23#include "vsock_addr.h" 23#include "vsock_addr.h"
24 24
25/* vsock-specific sock->sk_state constants */
26#define VSOCK_SS_LISTEN 255
27
25#define LAST_RESERVED_PORT 1023 28#define LAST_RESERVED_PORT 1023
26 29
27#define vsock_sk(__sk) ((struct vsock_sock *)__sk) 30#define vsock_sk(__sk) ((struct vsock_sock *)__sk)
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 38d8a34d3589..42844d7b154a 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -122,12 +122,28 @@ struct bt_voice {
122__printf(1, 2) 122__printf(1, 2)
123void bt_info(const char *fmt, ...); 123void bt_info(const char *fmt, ...);
124__printf(1, 2) 124__printf(1, 2)
125void bt_warn(const char *fmt, ...);
126__printf(1, 2)
125void bt_err(const char *fmt, ...); 127void bt_err(const char *fmt, ...);
128__printf(1, 2)
129void bt_err_ratelimited(const char *fmt, ...);
126 130
127#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__) 131#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
132#define BT_WARN(fmt, ...) bt_warn(fmt "\n", ##__VA_ARGS__)
128#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__) 133#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
129#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__) 134#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
130 135
136#define BT_ERR_RATELIMITED(fmt, ...) bt_err_ratelimited(fmt "\n", ##__VA_ARGS__)
137
138#define bt_dev_info(hdev, fmt, ...) \
139 BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
140#define bt_dev_warn(hdev, fmt, ...) \
141 BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
142#define bt_dev_err(hdev, fmt, ...) \
143 BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
144#define bt_dev_dbg(hdev, fmt, ...) \
145 BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
146
131/* Connection and socket states */ 147/* Connection and socket states */
132enum { 148enum {
133 BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */ 149 BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */
@@ -280,22 +296,22 @@ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
280typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status, 296typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
281 u16 opcode, struct sk_buff *skb); 297 u16 opcode, struct sk_buff *skb);
282 298
283struct req_ctrl { 299struct hci_ctrl {
284 bool start; 300 __u16 opcode;
285 u8 event; 301 bool req_start;
286 hci_req_complete_t complete; 302 u8 req_event;
287 hci_req_complete_skb_t complete_skb; 303 hci_req_complete_t req_complete;
304 hci_req_complete_skb_t req_complete_skb;
288}; 305};
289 306
290struct bt_skb_cb { 307struct bt_skb_cb {
291 __u8 pkt_type; 308 __u8 pkt_type;
292 __u8 force_active; 309 __u8 force_active;
293 __u16 opcode;
294 __u16 expect; 310 __u16 expect;
295 __u8 incoming:1; 311 __u8 incoming:1;
296 union { 312 union {
297 struct l2cap_ctrl l2cap; 313 struct l2cap_ctrl l2cap;
298 struct req_ctrl req; 314 struct hci_ctrl hci;
299 }; 315 };
300}; 316};
301#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) 317#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 7ca6690355ea..0205b80cc90b 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -44,6 +44,9 @@
44#define HCI_DEV_DOWN 4 44#define HCI_DEV_DOWN 4
45#define HCI_DEV_SUSPEND 5 45#define HCI_DEV_SUSPEND 5
46#define HCI_DEV_RESUME 6 46#define HCI_DEV_RESUME 6
47#define HCI_DEV_OPEN 7
48#define HCI_DEV_CLOSE 8
49#define HCI_DEV_SETUP 9
47 50
48/* HCI notify events */ 51/* HCI notify events */
49#define HCI_NOTIFY_CONN_ADD 1 52#define HCI_NOTIFY_CONN_ADD 1
@@ -168,6 +171,15 @@ enum {
168 * during the hdev->setup vendor callback. 171 * during the hdev->setup vendor callback.
169 */ 172 */
170 HCI_QUIRK_SIMULTANEOUS_DISCOVERY, 173 HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
174
175 /* When this quirk is set, the enabling of diagnostic mode is
176 * not persistent over HCI Reset. Every time the controller
177 * is brought up it needs to be reprogrammed.
178 *
179 * This quirk can be set before hci_register_dev is called or
180 * during the hdev->setup vendor callback.
181 */
182 HCI_QUIRK_NON_PERSISTENT_DIAG,
171}; 183};
172 184
173/* HCI device flags */ 185/* HCI device flags */
@@ -238,6 +250,7 @@ enum {
238 HCI_LE_SCAN_INTERRUPTED, 250 HCI_LE_SCAN_INTERRUPTED,
239 251
240 HCI_DUT_MODE, 252 HCI_DUT_MODE,
253 HCI_VENDOR_DIAG,
241 HCI_FORCE_BREDR_SMP, 254 HCI_FORCE_BREDR_SMP,
242 HCI_FORCE_STATIC_ADDR, 255 HCI_FORCE_STATIC_ADDR,
243 256
@@ -260,6 +273,7 @@ enum {
260#define HCI_ACLDATA_PKT 0x02 273#define HCI_ACLDATA_PKT 0x02
261#define HCI_SCODATA_PKT 0x03 274#define HCI_SCODATA_PKT 0x03
262#define HCI_EVENT_PKT 0x04 275#define HCI_EVENT_PKT 0x04
276#define HCI_DIAG_PKT 0xf0
263#define HCI_VENDOR_PKT 0xff 277#define HCI_VENDOR_PKT 0xff
264 278
265/* HCI packet types */ 279/* HCI packet types */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 9e1a59e01fa2..1878d0a96333 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -398,6 +398,8 @@ struct hci_dev {
398 int (*send)(struct hci_dev *hdev, struct sk_buff *skb); 398 int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
399 void (*notify)(struct hci_dev *hdev, unsigned int evt); 399 void (*notify)(struct hci_dev *hdev, unsigned int evt);
400 void (*hw_error)(struct hci_dev *hdev, u8 code); 400 void (*hw_error)(struct hci_dev *hdev, u8 code);
401 int (*post_init)(struct hci_dev *hdev);
402 int (*set_diag)(struct hci_dev *hdev, bool enable);
401 int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); 403 int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
402}; 404};
403 405
@@ -469,6 +471,7 @@ struct hci_conn {
469 struct delayed_work auto_accept_work; 471 struct delayed_work auto_accept_work;
470 struct delayed_work idle_work; 472 struct delayed_work idle_work;
471 struct delayed_work le_conn_timeout; 473 struct delayed_work le_conn_timeout;
474 struct work_struct le_scan_cleanup;
472 475
473 struct device dev; 476 struct device dev;
474 struct dentry *debugfs; 477 struct dentry *debugfs;
@@ -791,6 +794,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
791 return NULL; 794 return NULL;
792} 795}
793 796
797static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
798 bdaddr_t *ba,
799 __u8 ba_type)
800{
801 struct hci_conn_hash *h = &hdev->conn_hash;
802 struct hci_conn *c;
803
804 rcu_read_lock();
805
806 list_for_each_entry_rcu(c, &h->list, list) {
807 if (c->type != LE_LINK)
808 continue;
809
810 if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
811 rcu_read_unlock();
812 return c;
813 }
814 }
815
816 rcu_read_unlock();
817
818 return NULL;
819}
820
794static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, 821static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
795 __u8 type, __u16 state) 822 __u8 type, __u16 state)
796{ 823{
@@ -987,6 +1014,7 @@ int hci_resume_dev(struct hci_dev *hdev);
987int hci_reset_dev(struct hci_dev *hdev); 1014int hci_reset_dev(struct hci_dev *hdev);
988int hci_dev_open(__u16 dev); 1015int hci_dev_open(__u16 dev);
989int hci_dev_close(__u16 dev); 1016int hci_dev_close(__u16 dev);
1017int hci_dev_do_close(struct hci_dev *hdev);
990int hci_dev_reset(__u16 dev); 1018int hci_dev_reset(__u16 dev);
991int hci_dev_reset_stat(__u16 dev); 1019int hci_dev_reset_stat(__u16 dev);
992int hci_dev_cmd(unsigned int cmd, void __user *arg); 1020int hci_dev_cmd(unsigned int cmd, void __user *arg);
@@ -1014,9 +1042,6 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev);
1014struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 1042struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
1015 bdaddr_t *addr, 1043 bdaddr_t *addr,
1016 u8 addr_type); 1044 u8 addr_type);
1017struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
1018 bdaddr_t *addr,
1019 u8 addr_type);
1020 1045
1021void hci_uuids_clear(struct hci_dev *hdev); 1046void hci_uuids_clear(struct hci_dev *hdev);
1022 1047
@@ -1065,6 +1090,7 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
1065void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); 1090void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
1066 1091
1067int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); 1092int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
1093int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
1068 1094
1069void hci_init_sysfs(struct hci_dev *hdev); 1095void hci_init_sysfs(struct hci_dev *hdev);
1070void hci_conn_init_sysfs(struct hci_conn *conn); 1096void hci_conn_init_sysfs(struct hci_conn *conn);
@@ -1348,6 +1374,9 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
1348 1374
1349void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); 1375void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
1350 1376
1377struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1378 const void *param, u32 timeout);
1379
1351/* ----- HCI Sockets ----- */ 1380/* ----- HCI Sockets ----- */
1352void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); 1381void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
1353void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, 1382void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
@@ -1452,7 +1481,7 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1452void mgmt_discovering(struct hci_dev *hdev, u8 discovering); 1481void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
1453bool mgmt_powering_down(struct hci_dev *hdev); 1482bool mgmt_powering_down(struct hci_dev *hdev);
1454void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); 1483void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
1455void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk); 1484void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
1456void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, 1485void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
1457 bool persistent); 1486 bool persistent);
1458void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, 1487void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 77d1e5764185..2b67567cf28d 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -39,6 +39,10 @@ struct hci_mon_hdr {
39#define HCI_MON_ACL_RX_PKT 5 39#define HCI_MON_ACL_RX_PKT 5
40#define HCI_MON_SCO_TX_PKT 6 40#define HCI_MON_SCO_TX_PKT 6
41#define HCI_MON_SCO_RX_PKT 7 41#define HCI_MON_SCO_RX_PKT 7
42#define HCI_MON_OPEN_INDEX 8
43#define HCI_MON_CLOSE_INDEX 9
44#define HCI_MON_INDEX_INFO 10
45#define HCI_MON_VENDOR_DIAG 11
42 46
43struct hci_mon_new_index { 47struct hci_mon_new_index {
44 __u8 type; 48 __u8 type;
@@ -48,4 +52,10 @@ struct hci_mon_new_index {
48} __packed; 52} __packed;
49#define HCI_MON_NEW_INDEX_SIZE 16 53#define HCI_MON_NEW_INDEX_SIZE 16
50 54
55struct hci_mon_index_info {
56 bdaddr_t bdaddr;
57 __le16 manufacturer;
58} __packed;
59#define HCI_MON_INDEX_INFO_SIZE 8
60
51#endif /* __HCI_MON_H */ 61#endif /* __HCI_MON_H */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index c98afc08cc26..52899291f401 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -275,6 +275,8 @@ struct l2cap_conn_rsp {
275#define L2CAP_CR_AUTHORIZATION 0x0006 275#define L2CAP_CR_AUTHORIZATION 0x0006
276#define L2CAP_CR_BAD_KEY_SIZE 0x0007 276#define L2CAP_CR_BAD_KEY_SIZE 0x0007
277#define L2CAP_CR_ENCRYPTION 0x0008 277#define L2CAP_CR_ENCRYPTION 0x0008
278#define L2CAP_CR_INVALID_SCID 0x0009
279#define L2CAP_CR_SCID_IN_USE 0x0010
278 280
279/* connect/create channel status */ 281/* connect/create channel status */
280#define L2CAP_CS_NO_INFO 0x0000 282#define L2CAP_CS_NO_INFO 0x0000
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index c2a40a172fcd..f1fbc3b11962 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -297,8 +297,7 @@ void bond_3ad_bind_slave(struct slave *slave);
297void bond_3ad_unbind_slave(struct slave *slave); 297void bond_3ad_unbind_slave(struct slave *slave);
298void bond_3ad_state_machine_handler(struct work_struct *); 298void bond_3ad_state_machine_handler(struct work_struct *);
299void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout); 299void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
300void bond_3ad_adapter_speed_changed(struct slave *slave); 300void bond_3ad_adapter_speed_duplex_changed(struct slave *slave);
301void bond_3ad_adapter_duplex_changed(struct slave *slave);
302void bond_3ad_handle_link_change(struct slave *slave, char link); 301void bond_3ad_handle_link_change(struct slave *slave, char link);
303int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 302int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
304int __bond_3ad_get_active_agg_info(struct bonding *bond, 303int __bond_3ad_get_active_agg_info(struct bonding *bond,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f0889a247643..2c7bdb81d30c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5,6 +5,7 @@
5 * 5 *
6 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 * Copyright 2013-2014 Intel Mobile Communications GmbH
8 * Copyright 2015 Intel Deutschland GmbH
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -858,6 +859,8 @@ struct station_del_parameters {
858/** 859/**
859 * enum cfg80211_station_type - the type of station being modified 860 * enum cfg80211_station_type - the type of station being modified
860 * @CFG80211_STA_AP_CLIENT: client of an AP interface 861 * @CFG80211_STA_AP_CLIENT: client of an AP interface
862 * @CFG80211_STA_AP_CLIENT_UNASSOC: client of an AP interface that is still
863 * unassociated (update properties for this type of client is permitted)
861 * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has 864 * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has
862 * the AP MLME in the device 865 * the AP MLME in the device
863 * @CFG80211_STA_AP_STA: AP station on managed interface 866 * @CFG80211_STA_AP_STA: AP station on managed interface
@@ -873,6 +876,7 @@ struct station_del_parameters {
873 */ 876 */
874enum cfg80211_station_type { 877enum cfg80211_station_type {
875 CFG80211_STA_AP_CLIENT, 878 CFG80211_STA_AP_CLIENT,
879 CFG80211_STA_AP_CLIENT_UNASSOC,
876 CFG80211_STA_AP_MLME_CLIENT, 880 CFG80211_STA_AP_MLME_CLIENT,
877 CFG80211_STA_AP_STA, 881 CFG80211_STA_AP_STA,
878 CFG80211_STA_IBSS, 882 CFG80211_STA_IBSS,
@@ -1498,13 +1502,26 @@ struct cfg80211_match_set {
1498}; 1502};
1499 1503
1500/** 1504/**
1505 * struct cfg80211_sched_scan_plan - scan plan for scheduled scan
1506 *
1507 * @interval: interval between scheduled scan iterations. In seconds.
1508 * @iterations: number of scan iterations in this scan plan. Zero means
1509 * infinite loop.
1510 * The last scan plan will always have this parameter set to zero,
1511 * all other scan plans will have a finite number of iterations.
1512 */
1513struct cfg80211_sched_scan_plan {
1514 u32 interval;
1515 u32 iterations;
1516};
1517
1518/**
1501 * struct cfg80211_sched_scan_request - scheduled scan request description 1519 * struct cfg80211_sched_scan_request - scheduled scan request description
1502 * 1520 *
1503 * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans) 1521 * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
1504 * @n_ssids: number of SSIDs 1522 * @n_ssids: number of SSIDs
1505 * @n_channels: total number of channels to scan 1523 * @n_channels: total number of channels to scan
1506 * @scan_width: channel width for scanning 1524 * @scan_width: channel width for scanning
1507 * @interval: interval between each scheduled scan cycle
1508 * @ie: optional information element(s) to add into Probe Request or %NULL 1525 * @ie: optional information element(s) to add into Probe Request or %NULL
1509 * @ie_len: length of ie in octets 1526 * @ie_len: length of ie in octets
1510 * @flags: bit field of flags controlling operation 1527 * @flags: bit field of flags controlling operation
@@ -1523,6 +1540,9 @@ struct cfg80211_match_set {
1523 * @mac_addr_mask: MAC address mask used with randomisation, bits that 1540 * @mac_addr_mask: MAC address mask used with randomisation, bits that
1524 * are 0 in the mask should be randomised, bits that are 1 should 1541 * are 0 in the mask should be randomised, bits that are 1 should
1525 * be taken from the @mac_addr 1542 * be taken from the @mac_addr
1543 * @scan_plans: scan plans to be executed in this scheduled scan. Lowest
1544 * index must be executed first.
1545 * @n_scan_plans: number of scan plans, at least 1.
1526 * @rcu_head: RCU callback used to free the struct 1546 * @rcu_head: RCU callback used to free the struct
1527 * @owner_nlportid: netlink portid of owner (if this should is a request 1547 * @owner_nlportid: netlink portid of owner (if this should is a request
1528 * owned by a particular socket) 1548 * owned by a particular socket)
@@ -1536,7 +1556,6 @@ struct cfg80211_sched_scan_request {
1536 int n_ssids; 1556 int n_ssids;
1537 u32 n_channels; 1557 u32 n_channels;
1538 enum nl80211_bss_scan_width scan_width; 1558 enum nl80211_bss_scan_width scan_width;
1539 u32 interval;
1540 const u8 *ie; 1559 const u8 *ie;
1541 size_t ie_len; 1560 size_t ie_len;
1542 u32 flags; 1561 u32 flags;
@@ -1544,6 +1563,8 @@ struct cfg80211_sched_scan_request {
1544 int n_match_sets; 1563 int n_match_sets;
1545 s32 min_rssi_thold; 1564 s32 min_rssi_thold;
1546 u32 delay; 1565 u32 delay;
1566 struct cfg80211_sched_scan_plan *scan_plans;
1567 int n_scan_plans;
1547 1568
1548 u8 mac_addr[ETH_ALEN] __aligned(2); 1569 u8 mac_addr[ETH_ALEN] __aligned(2);
1549 u8 mac_addr_mask[ETH_ALEN] __aligned(2); 1570 u8 mac_addr_mask[ETH_ALEN] __aligned(2);
@@ -1573,6 +1594,26 @@ enum cfg80211_signal_type {
1573}; 1594};
1574 1595
1575/** 1596/**
1597 * struct cfg80211_inform_bss - BSS inform data
1598 * @chan: channel the frame was received on
1599 * @scan_width: scan width that was used
1600 * @signal: signal strength value, according to the wiphy's
1601 * signal type
1602 * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was
1603 * received; should match the time when the frame was actually
1604 * received by the device (not just by the host, in case it was
1605 * buffered on the device) and be accurate to about 10ms.
1606 * If the frame isn't buffered, just passing the return value of
1607 * ktime_get_boot_ns() is likely appropriate.
1608 */
1609struct cfg80211_inform_bss {
1610 struct ieee80211_channel *chan;
1611 enum nl80211_bss_scan_width scan_width;
1612 s32 signal;
1613 u64 boottime_ns;
1614};
1615
1616/**
1576 * struct cfg80211_bss_ie_data - BSS entry IE data 1617 * struct cfg80211_bss_ie_data - BSS entry IE data
1577 * @tsf: TSF contained in the frame that carried these IEs 1618 * @tsf: TSF contained in the frame that carried these IEs
1578 * @rcu_head: internal use, for freeing 1619 * @rcu_head: internal use, for freeing
@@ -2358,6 +2399,10 @@ struct cfg80211_qos_map {
2358 * @set_power_mgmt: Configure WLAN power management. A timeout value of -1 2399 * @set_power_mgmt: Configure WLAN power management. A timeout value of -1
2359 * allows the driver to adjust the dynamic ps timeout value. 2400 * allows the driver to adjust the dynamic ps timeout value.
2360 * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold. 2401 * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold.
2402 * After configuration, the driver should (soon) send an event indicating
2403 * the current level is above/below the configured threshold; this may
2404 * need some care when the configuration is changed (without first being
2405 * disabled.)
2361 * @set_cqm_txe_config: Configure connection quality monitor TX error 2406 * @set_cqm_txe_config: Configure connection quality monitor TX error
2362 * thresholds. 2407 * thresholds.
2363 * @sched_scan_start: Tell the driver to start a scheduled scan. 2408 * @sched_scan_start: Tell the driver to start a scheduled scan.
@@ -2971,12 +3016,21 @@ enum wiphy_vendor_command_flags {
2971 * @doit: callback for the operation, note that wdev is %NULL if the 3016 * @doit: callback for the operation, note that wdev is %NULL if the
2972 * flags didn't ask for a wdev and non-%NULL otherwise; the data 3017 * flags didn't ask for a wdev and non-%NULL otherwise; the data
2973 * pointer may be %NULL if userspace provided no data at all 3018 * pointer may be %NULL if userspace provided no data at all
3019 * @dumpit: dump callback, for transferring bigger/multiple items. The
3020 * @storage points to cb->args[5], ie. is preserved over the multiple
3021 * dumpit calls.
3022 * It's recommended to not have the same sub command with both @doit and
3023 * @dumpit, so that userspace can assume certain ones are get and others
3024 * are used with dump requests.
2974 */ 3025 */
2975struct wiphy_vendor_command { 3026struct wiphy_vendor_command {
2976 struct nl80211_vendor_cmd_info info; 3027 struct nl80211_vendor_cmd_info info;
2977 u32 flags; 3028 u32 flags;
2978 int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev, 3029 int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev,
2979 const void *data, int data_len); 3030 const void *data, int data_len);
3031 int (*dumpit)(struct wiphy *wiphy, struct wireless_dev *wdev,
3032 struct sk_buff *skb, const void *data, int data_len,
3033 unsigned long *storage);
2980}; 3034};
2981 3035
2982/** 3036/**
@@ -3044,6 +3098,12 @@ struct wiphy_vendor_command {
3044 * include fixed IEs like supported rates 3098 * include fixed IEs like supported rates
3045 * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled 3099 * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled
3046 * scans 3100 * scans
3101 * @max_sched_scan_plans: maximum number of scan plans (scan interval and number
3102 * of iterations) for scheduled scan supported by the device.
3103 * @max_sched_scan_plan_interval: maximum interval (in seconds) for a
3104 * single scan plan supported by the device.
3105 * @max_sched_scan_plan_iterations: maximum number of iterations for a single
3106 * scan plan supported by the device.
3047 * @coverage_class: current coverage class 3107 * @coverage_class: current coverage class
3048 * @fw_version: firmware version for ethtool reporting 3108 * @fw_version: firmware version for ethtool reporting
3049 * @hw_version: hardware version for ethtool reporting 3109 * @hw_version: hardware version for ethtool reporting
@@ -3151,6 +3211,9 @@ struct wiphy {
3151 u8 max_match_sets; 3211 u8 max_match_sets;
3152 u16 max_scan_ie_len; 3212 u16 max_scan_ie_len;
3153 u16 max_sched_scan_ie_len; 3213 u16 max_sched_scan_ie_len;
3214 u32 max_sched_scan_plans;
3215 u32 max_sched_scan_plan_interval;
3216 u32 max_sched_scan_plan_iterations;
3154 3217
3155 int n_cipher_suites; 3218 int n_cipher_suites;
3156 const u32 *cipher_suites; 3219 const u32 *cipher_suites;
@@ -3946,14 +4009,11 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
3946void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy); 4009void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
3947 4010
3948/** 4011/**
3949 * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame 4012 * cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame
3950 *
3951 * @wiphy: the wiphy reporting the BSS 4013 * @wiphy: the wiphy reporting the BSS
3952 * @rx_channel: The channel the frame was received on 4014 * @data: the BSS metadata
3953 * @scan_width: width of the control channel
3954 * @mgmt: the management frame (probe response or beacon) 4015 * @mgmt: the management frame (probe response or beacon)
3955 * @len: length of the management frame 4016 * @len: length of the management frame
3956 * @signal: the signal strength, type depends on the wiphy's signal_type
3957 * @gfp: context flags 4017 * @gfp: context flags
3958 * 4018 *
3959 * This informs cfg80211 that BSS information was found and 4019 * This informs cfg80211 that BSS information was found and
@@ -3963,11 +4023,26 @@ void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
3963 * Or %NULL on error. 4023 * Or %NULL on error.
3964 */ 4024 */
3965struct cfg80211_bss * __must_check 4025struct cfg80211_bss * __must_check
4026cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
4027 struct cfg80211_inform_bss *data,
4028 struct ieee80211_mgmt *mgmt, size_t len,
4029 gfp_t gfp);
4030
4031static inline struct cfg80211_bss * __must_check
3966cfg80211_inform_bss_width_frame(struct wiphy *wiphy, 4032cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
3967 struct ieee80211_channel *rx_channel, 4033 struct ieee80211_channel *rx_channel,
3968 enum nl80211_bss_scan_width scan_width, 4034 enum nl80211_bss_scan_width scan_width,
3969 struct ieee80211_mgmt *mgmt, size_t len, 4035 struct ieee80211_mgmt *mgmt, size_t len,
3970 s32 signal, gfp_t gfp); 4036 s32 signal, gfp_t gfp)
4037{
4038 struct cfg80211_inform_bss data = {
4039 .chan = rx_channel,
4040 .scan_width = scan_width,
4041 .signal = signal,
4042 };
4043
4044 return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
4045}
3971 4046
3972static inline struct cfg80211_bss * __must_check 4047static inline struct cfg80211_bss * __must_check
3973cfg80211_inform_bss_frame(struct wiphy *wiphy, 4048cfg80211_inform_bss_frame(struct wiphy *wiphy,
@@ -3975,9 +4050,13 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
3975 struct ieee80211_mgmt *mgmt, size_t len, 4050 struct ieee80211_mgmt *mgmt, size_t len,
3976 s32 signal, gfp_t gfp) 4051 s32 signal, gfp_t gfp)
3977{ 4052{
3978 return cfg80211_inform_bss_width_frame(wiphy, rx_channel, 4053 struct cfg80211_inform_bss data = {
3979 NL80211_BSS_CHAN_WIDTH_20, 4054 .chan = rx_channel,
3980 mgmt, len, signal, gfp); 4055 .scan_width = NL80211_BSS_CHAN_WIDTH_20,
4056 .signal = signal,
4057 };
4058
4059 return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
3981} 4060}
3982 4061
3983/** 4062/**
@@ -3994,11 +4073,10 @@ enum cfg80211_bss_frame_type {
3994}; 4073};
3995 4074
3996/** 4075/**
3997 * cfg80211_inform_bss_width - inform cfg80211 of a new BSS 4076 * cfg80211_inform_bss_data - inform cfg80211 of a new BSS
3998 * 4077 *
3999 * @wiphy: the wiphy reporting the BSS 4078 * @wiphy: the wiphy reporting the BSS
4000 * @rx_channel: The channel the frame was received on 4079 * @data: the BSS metadata
4001 * @scan_width: width of the control channel
4002 * @ftype: frame type (if known) 4080 * @ftype: frame type (if known)
4003 * @bssid: the BSSID of the BSS 4081 * @bssid: the BSSID of the BSS
4004 * @tsf: the TSF sent by the peer in the beacon/probe response (or 0) 4082 * @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
@@ -4006,7 +4084,6 @@ enum cfg80211_bss_frame_type {
4006 * @beacon_interval: the beacon interval announced by the peer 4084 * @beacon_interval: the beacon interval announced by the peer
4007 * @ie: additional IEs sent by the peer 4085 * @ie: additional IEs sent by the peer
4008 * @ielen: length of the additional IEs 4086 * @ielen: length of the additional IEs
4009 * @signal: the signal strength, type depends on the wiphy's signal_type
4010 * @gfp: context flags 4087 * @gfp: context flags
4011 * 4088 *
4012 * This informs cfg80211 that BSS information was found and 4089 * This informs cfg80211 that BSS information was found and
@@ -4016,13 +4093,32 @@ enum cfg80211_bss_frame_type {
4016 * Or %NULL on error. 4093 * Or %NULL on error.
4017 */ 4094 */
4018struct cfg80211_bss * __must_check 4095struct cfg80211_bss * __must_check
4096cfg80211_inform_bss_data(struct wiphy *wiphy,
4097 struct cfg80211_inform_bss *data,
4098 enum cfg80211_bss_frame_type ftype,
4099 const u8 *bssid, u64 tsf, u16 capability,
4100 u16 beacon_interval, const u8 *ie, size_t ielen,
4101 gfp_t gfp);
4102
4103static inline struct cfg80211_bss * __must_check
4019cfg80211_inform_bss_width(struct wiphy *wiphy, 4104cfg80211_inform_bss_width(struct wiphy *wiphy,
4020 struct ieee80211_channel *rx_channel, 4105 struct ieee80211_channel *rx_channel,
4021 enum nl80211_bss_scan_width scan_width, 4106 enum nl80211_bss_scan_width scan_width,
4022 enum cfg80211_bss_frame_type ftype, 4107 enum cfg80211_bss_frame_type ftype,
4023 const u8 *bssid, u64 tsf, u16 capability, 4108 const u8 *bssid, u64 tsf, u16 capability,
4024 u16 beacon_interval, const u8 *ie, size_t ielen, 4109 u16 beacon_interval, const u8 *ie, size_t ielen,
4025 s32 signal, gfp_t gfp); 4110 s32 signal, gfp_t gfp)
4111{
4112 struct cfg80211_inform_bss data = {
4113 .chan = rx_channel,
4114 .scan_width = scan_width,
4115 .signal = signal,
4116 };
4117
4118 return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
4119 capability, beacon_interval, ie, ielen,
4120 gfp);
4121}
4026 4122
4027static inline struct cfg80211_bss * __must_check 4123static inline struct cfg80211_bss * __must_check
4028cfg80211_inform_bss(struct wiphy *wiphy, 4124cfg80211_inform_bss(struct wiphy *wiphy,
@@ -4032,11 +4128,15 @@ cfg80211_inform_bss(struct wiphy *wiphy,
4032 u16 beacon_interval, const u8 *ie, size_t ielen, 4128 u16 beacon_interval, const u8 *ie, size_t ielen,
4033 s32 signal, gfp_t gfp) 4129 s32 signal, gfp_t gfp)
4034{ 4130{
4035 return cfg80211_inform_bss_width(wiphy, rx_channel, 4131 struct cfg80211_inform_bss data = {
4036 NL80211_BSS_CHAN_WIDTH_20, ftype, 4132 .chan = rx_channel,
4037 bssid, tsf, capability, 4133 .scan_width = NL80211_BSS_CHAN_WIDTH_20,
4038 beacon_interval, ie, ielen, signal, 4134 .signal = signal,
4039 gfp); 4135 };
4136
4137 return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
4138 capability, beacon_interval, ie, ielen,
4139 gfp);
4040} 4140}
4041 4141
4042struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, 4142struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 76b1ffaea863..171cd76558fb 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -27,6 +27,16 @@
27struct wpan_phy; 27struct wpan_phy;
28struct wpan_phy_cca; 28struct wpan_phy_cca;
29 29
30#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
31struct ieee802154_llsec_device_key;
32struct ieee802154_llsec_seclevel;
33struct ieee802154_llsec_params;
34struct ieee802154_llsec_device;
35struct ieee802154_llsec_table;
36struct ieee802154_llsec_key_id;
37struct ieee802154_llsec_key;
38#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
39
30struct cfg802154_ops { 40struct cfg802154_ops {
31 struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, 41 struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
32 const char *name, 42 const char *name,
@@ -65,6 +75,51 @@ struct cfg802154_ops {
65 struct wpan_dev *wpan_dev, bool mode); 75 struct wpan_dev *wpan_dev, bool mode);
66 int (*set_ackreq_default)(struct wpan_phy *wpan_phy, 76 int (*set_ackreq_default)(struct wpan_phy *wpan_phy,
67 struct wpan_dev *wpan_dev, bool ackreq); 77 struct wpan_dev *wpan_dev, bool ackreq);
78#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
79 void (*get_llsec_table)(struct wpan_phy *wpan_phy,
80 struct wpan_dev *wpan_dev,
81 struct ieee802154_llsec_table **table);
82 void (*lock_llsec_table)(struct wpan_phy *wpan_phy,
83 struct wpan_dev *wpan_dev);
84 void (*unlock_llsec_table)(struct wpan_phy *wpan_phy,
85 struct wpan_dev *wpan_dev);
86 /* TODO remove locking/get table callbacks, this is part of the
87 * nl802154 interface and should be accessible from ieee802154 layer.
88 */
89 int (*get_llsec_params)(struct wpan_phy *wpan_phy,
90 struct wpan_dev *wpan_dev,
91 struct ieee802154_llsec_params *params);
92 int (*set_llsec_params)(struct wpan_phy *wpan_phy,
93 struct wpan_dev *wpan_dev,
94 const struct ieee802154_llsec_params *params,
95 int changed);
96 int (*add_llsec_key)(struct wpan_phy *wpan_phy,
97 struct wpan_dev *wpan_dev,
98 const struct ieee802154_llsec_key_id *id,
99 const struct ieee802154_llsec_key *key);
100 int (*del_llsec_key)(struct wpan_phy *wpan_phy,
101 struct wpan_dev *wpan_dev,
102 const struct ieee802154_llsec_key_id *id);
103 int (*add_seclevel)(struct wpan_phy *wpan_phy,
104 struct wpan_dev *wpan_dev,
105 const struct ieee802154_llsec_seclevel *sl);
106 int (*del_seclevel)(struct wpan_phy *wpan_phy,
107 struct wpan_dev *wpan_dev,
108 const struct ieee802154_llsec_seclevel *sl);
109 int (*add_device)(struct wpan_phy *wpan_phy,
110 struct wpan_dev *wpan_dev,
111 const struct ieee802154_llsec_device *dev);
112 int (*del_device)(struct wpan_phy *wpan_phy,
113 struct wpan_dev *wpan_dev, __le64 extended_addr);
114 int (*add_devkey)(struct wpan_phy *wpan_phy,
115 struct wpan_dev *wpan_dev,
116 __le64 extended_addr,
117 const struct ieee802154_llsec_device_key *key);
118 int (*del_devkey)(struct wpan_phy *wpan_phy,
119 struct wpan_dev *wpan_dev,
120 __le64 extended_addr,
121 const struct ieee802154_llsec_device_key *key);
122#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
68}; 123};
69 124
70static inline bool 125static inline bool
@@ -167,6 +222,102 @@ struct wpan_phy {
167 char priv[0] __aligned(NETDEV_ALIGN); 222 char priv[0] __aligned(NETDEV_ALIGN);
168}; 223};
169 224
225struct ieee802154_addr {
226 u8 mode;
227 __le16 pan_id;
228 union {
229 __le16 short_addr;
230 __le64 extended_addr;
231 };
232};
233
234struct ieee802154_llsec_key_id {
235 u8 mode;
236 u8 id;
237 union {
238 struct ieee802154_addr device_addr;
239 __le32 short_source;
240 __le64 extended_source;
241 };
242};
243
244#define IEEE802154_LLSEC_KEY_SIZE 16
245
246struct ieee802154_llsec_key {
247 u8 frame_types;
248 u32 cmd_frame_ids;
249 /* TODO replace with NL802154_KEY_SIZE */
250 u8 key[IEEE802154_LLSEC_KEY_SIZE];
251};
252
253struct ieee802154_llsec_key_entry {
254 struct list_head list;
255
256 struct ieee802154_llsec_key_id id;
257 struct ieee802154_llsec_key *key;
258};
259
260struct ieee802154_llsec_params {
261 bool enabled;
262
263 __be32 frame_counter;
264 u8 out_level;
265 struct ieee802154_llsec_key_id out_key;
266
267 __le64 default_key_source;
268
269 __le16 pan_id;
270 __le64 hwaddr;
271 __le64 coord_hwaddr;
272 __le16 coord_shortaddr;
273};
274
275struct ieee802154_llsec_table {
276 struct list_head keys;
277 struct list_head devices;
278 struct list_head security_levels;
279};
280
281struct ieee802154_llsec_seclevel {
282 struct list_head list;
283
284 u8 frame_type;
285 u8 cmd_frame_id;
286 bool device_override;
287 u32 sec_levels;
288};
289
290struct ieee802154_llsec_device {
291 struct list_head list;
292
293 __le16 pan_id;
294 __le16 short_addr;
295 __le64 hwaddr;
296 u32 frame_counter;
297 bool seclevel_exempt;
298
299 u8 key_mode;
300 struct list_head keys;
301};
302
303struct ieee802154_llsec_device_key {
304 struct list_head list;
305
306 struct ieee802154_llsec_key_id key_id;
307 u32 frame_counter;
308};
309
310struct wpan_dev_header_ops {
311 /* TODO create callback currently assumes ieee802154_mac_cb inside
312 * skb->cb. This should be changed to give these information as
313 * parameter.
314 */
315 int (*create)(struct sk_buff *skb, struct net_device *dev,
316 const struct ieee802154_addr *daddr,
317 const struct ieee802154_addr *saddr,
318 unsigned int len);
319};
320
170struct wpan_dev { 321struct wpan_dev {
171 struct wpan_phy *wpan_phy; 322 struct wpan_phy *wpan_phy;
172 int iftype; 323 int iftype;
@@ -175,6 +326,8 @@ struct wpan_dev {
175 struct list_head list; 326 struct list_head list;
176 struct net_device *netdev; 327 struct net_device *netdev;
177 328
329 const struct wpan_dev_header_ops *header_ops;
330
178 /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */ 331 /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
179 struct net_device *lowpan_dev; 332 struct net_device *lowpan_dev;
180 333
@@ -205,6 +358,17 @@ struct wpan_dev {
205 358
206#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) 359#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
207 360
361static inline int
362wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
363 const struct ieee802154_addr *daddr,
364 const struct ieee802154_addr *saddr,
365 unsigned int len)
366{
367 struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
368
369 return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len);
370}
371
208struct wpan_phy * 372struct wpan_phy *
209wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size); 373wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size);
210static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev) 374static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev)
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
index d0424269313f..5e902fc3f4eb 100644
--- a/include/net/dn_neigh.h
+++ b/include/net/dn_neigh.h
@@ -18,11 +18,11 @@ struct dn_neigh {
18 18
19void dn_neigh_init(void); 19void dn_neigh_init(void);
20void dn_neigh_cleanup(void); 20void dn_neigh_cleanup(void);
21int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb); 21int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
22int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb); 22int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
23void dn_neigh_pointopoint_hello(struct sk_buff *skb); 23void dn_neigh_pointopoint_hello(struct sk_buff *skb);
24int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n); 24int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
25int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb); 25int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb);
26 26
27extern struct neigh_table dn_neigh_table; 27extern struct neigh_table dn_neigh_table;
28 28
diff --git a/include/net/dsa.h b/include/net/dsa.h
index b34d812bc5d0..82a4c6011173 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -197,6 +197,11 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
197 return ds->pd->rtable[dst->cpu_switch]; 197 return ds->pd->rtable[dst->cpu_switch];
198} 198}
199 199
200struct switchdev_trans;
201struct switchdev_obj;
202struct switchdev_obj_port_fdb;
203struct switchdev_obj_port_vlan;
204
200struct dsa_switch_driver { 205struct dsa_switch_driver {
201 struct list_head list; 206 struct list_head list;
202 207
@@ -305,24 +310,32 @@ struct dsa_switch_driver {
305 /* 310 /*
306 * VLAN support 311 * VLAN support
307 */ 312 */
313 int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
314 const struct switchdev_obj_port_vlan *vlan,
315 struct switchdev_trans *trans);
316 int (*port_vlan_add)(struct dsa_switch *ds, int port,
317 const struct switchdev_obj_port_vlan *vlan,
318 struct switchdev_trans *trans);
319 int (*port_vlan_del)(struct dsa_switch *ds, int port,
320 const struct switchdev_obj_port_vlan *vlan);
308 int (*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid); 321 int (*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid);
309 int (*port_pvid_set)(struct dsa_switch *ds, int port, u16 pvid);
310 int (*port_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
311 bool untagged);
312 int (*port_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
313 int (*vlan_getnext)(struct dsa_switch *ds, u16 *vid, 322 int (*vlan_getnext)(struct dsa_switch *ds, u16 *vid,
314 unsigned long *ports, unsigned long *untagged); 323 unsigned long *ports, unsigned long *untagged);
315 324
316 /* 325 /*
317 * Forwarding database 326 * Forwarding database
318 */ 327 */
328 int (*port_fdb_prepare)(struct dsa_switch *ds, int port,
329 const struct switchdev_obj_port_fdb *fdb,
330 struct switchdev_trans *trans);
319 int (*port_fdb_add)(struct dsa_switch *ds, int port, 331 int (*port_fdb_add)(struct dsa_switch *ds, int port,
320 const unsigned char *addr, u16 vid); 332 const struct switchdev_obj_port_fdb *fdb,
333 struct switchdev_trans *trans);
321 int (*port_fdb_del)(struct dsa_switch *ds, int port, 334 int (*port_fdb_del)(struct dsa_switch *ds, int port,
322 const unsigned char *addr, u16 vid); 335 const struct switchdev_obj_port_fdb *fdb);
323 int (*port_fdb_getnext)(struct dsa_switch *ds, int port, 336 int (*port_fdb_dump)(struct dsa_switch *ds, int port,
324 unsigned char *addr, u16 *vid, 337 struct switchdev_obj_port_fdb *fdb,
325 bool *is_static); 338 int (*cb)(struct switchdev_obj *obj));
326}; 339};
327 340
328void register_switch_driver(struct dsa_switch_driver *type); 341void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst.h b/include/net/dst.h
index 9261d928303d..1279f9b09791 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -45,7 +45,7 @@ struct dst_entry {
45 void *__pad1; 45 void *__pad1;
46#endif 46#endif
47 int (*input)(struct sk_buff *); 47 int (*input)(struct sk_buff *);
48 int (*output)(struct sock *sk, struct sk_buff *skb); 48 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
49 49
50 unsigned short flags; 50 unsigned short flags;
51#define DST_HOST 0x0001 51#define DST_HOST 0x0001
@@ -365,10 +365,10 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
365 __skb_tunnel_rx(skb, dev, net); 365 __skb_tunnel_rx(skb, dev, net);
366} 366}
367 367
368int dst_discard_sk(struct sock *sk, struct sk_buff *skb); 368int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
369static inline int dst_discard(struct sk_buff *skb) 369static inline int dst_discard(struct sk_buff *skb)
370{ 370{
371 return dst_discard_sk(skb->sk, skb); 371 return dst_discard_out(&init_net, skb->sk, skb);
372} 372}
373void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, 373void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
374 int initial_obsolete, unsigned short flags); 374 int initial_obsolete, unsigned short flags);
@@ -454,13 +454,9 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout)
454} 454}
455 455
456/* Output packet to network from transport. */ 456/* Output packet to network from transport. */
457static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb) 457static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
458{ 458{
459 return skb_dst(skb)->output(sk, skb); 459 return skb_dst(skb)->output(net, sk, skb);
460}
461static inline int dst_output(struct sk_buff *skb)
462{
463 return dst_output_sk(skb->sk, skb);
464} 460}
465 461
466/* Input packet from network to transport. */ 462/* Input packet from network to transport. */
@@ -489,7 +485,8 @@ struct flowi;
489#ifndef CONFIG_XFRM 485#ifndef CONFIG_XFRM
490static inline struct dst_entry *xfrm_lookup(struct net *net, 486static inline struct dst_entry *xfrm_lookup(struct net *net,
491 struct dst_entry *dst_orig, 487 struct dst_entry *dst_orig,
492 const struct flowi *fl, struct sock *sk, 488 const struct flowi *fl,
489 const struct sock *sk,
493 int flags) 490 int flags)
494{ 491{
495 return dst_orig; 492 return dst_orig;
@@ -498,7 +495,7 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
498static inline struct dst_entry *xfrm_lookup_route(struct net *net, 495static inline struct dst_entry *xfrm_lookup_route(struct net *net,
499 struct dst_entry *dst_orig, 496 struct dst_entry *dst_orig,
500 const struct flowi *fl, 497 const struct flowi *fl,
501 struct sock *sk, 498 const struct sock *sk,
502 int flags) 499 int flags)
503{ 500{
504 return dst_orig; 501 return dst_orig;
@@ -511,11 +508,11 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
511 508
512#else 509#else
513struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 510struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
514 const struct flowi *fl, struct sock *sk, 511 const struct flowi *fl, const struct sock *sk,
515 int flags); 512 int flags);
516 513
517struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, 514struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
518 const struct flowi *fl, struct sock *sk, 515 const struct flowi *fl, const struct sock *sk,
519 int flags); 516 int flags);
520 517
521/* skb attached with this dst needs transformation if dst->xfrm is valid */ 518/* skb attached with this dst needs transformation if dst->xfrm is valid */
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index af9d5382f6cb..6816f0fa5693 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -60,6 +60,39 @@ static inline struct metadata_dst *tun_rx_dst(int md_size)
60 return tun_dst; 60 return tun_dst;
61} 61}
62 62
63static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
64{
65 struct metadata_dst *md_dst = skb_metadata_dst(skb);
66 int md_size;
67 struct metadata_dst *new_md;
68
69 if (!md_dst)
70 return ERR_PTR(-EINVAL);
71
72 md_size = md_dst->u.tun_info.options_len;
73 new_md = metadata_dst_alloc(md_size, GFP_ATOMIC);
74 if (!new_md)
75 return ERR_PTR(-ENOMEM);
76
77 memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
78 sizeof(struct ip_tunnel_info) + md_size);
79 skb_dst_drop(skb);
80 dst_hold(&new_md->dst);
81 skb_dst_set(skb, &new_md->dst);
82 return new_md;
83}
84
85static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
86{
87 struct metadata_dst *dst;
88
89 dst = tun_dst_unclone(skb);
90 if (IS_ERR(dst))
91 return NULL;
92
93 return &dst->u.tun_info;
94}
95
63static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb, 96static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
64 __be16 flags, 97 __be16 flags,
65 __be64 tunnel_id, 98 __be64 tunnel_id,
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index d64253914a6a..a0d443ca16fc 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -9,6 +9,7 @@ struct kmem_cachep;
9struct net_device; 9struct net_device;
10struct sk_buff; 10struct sk_buff;
11struct sock; 11struct sock;
12struct net;
12 13
13struct dst_ops { 14struct dst_ops {
14 unsigned short family; 15 unsigned short family;
@@ -28,7 +29,7 @@ struct dst_ops {
28 struct sk_buff *skb, u32 mtu); 29 struct sk_buff *skb, u32 mtu);
29 void (*redirect)(struct dst_entry *dst, struct sock *sk, 30 void (*redirect)(struct dst_entry *dst, struct sock *sk,
30 struct sk_buff *skb); 31 struct sk_buff *skb);
31 int (*local_out)(struct sk_buff *skb); 32 int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
32 struct neighbour * (*neigh_lookup)(const struct dst_entry *dst, 33 struct neighbour * (*neigh_lookup)(const struct dst_entry *dst,
33 struct sk_buff *skb, 34 struct sk_buff *skb,
34 const void *daddr); 35 const void *daddr);
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
index 2a2d6bb34eb8..bb7f467da7fc 100644
--- a/include/net/ethoc.h
+++ b/include/net/ethoc.h
@@ -17,6 +17,7 @@ struct ethoc_platform_data {
17 u8 hwaddr[IFHWADDRLEN]; 17 u8 hwaddr[IFHWADDRLEN];
18 s8 phy_id; 18 s8 phy_id;
19 u32 eth_clkfreq; 19 u32 eth_clkfreq;
20 bool big_endian;
20}; 21};
21 22
22#endif /* !LINUX_NET_ETHOC_H */ 23#endif /* !LINUX_NET_ETHOC_H */
diff --git a/include/net/flow.h b/include/net/flow.h
index 9b85db85f13c..83969eebebf3 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -34,7 +34,7 @@ struct flowi_common {
34 __u8 flowic_flags; 34 __u8 flowic_flags;
35#define FLOWI_FLAG_ANYSRC 0x01 35#define FLOWI_FLAG_ANYSRC 0x01
36#define FLOWI_FLAG_KNOWN_NH 0x02 36#define FLOWI_FLAG_KNOWN_NH 0x02
37#define FLOWI_FLAG_VRFSRC 0x04 37#define FLOWI_FLAG_L3MDEV_SRC 0x04
38#define FLOWI_FLAG_SKIP_NH_OIF 0x08 38#define FLOWI_FLAG_SKIP_NH_OIF 0x08
39 __u32 flowic_secid; 39 __u32 flowic_secid;
40 struct flowi_tunnel flowic_tun_key; 40 struct flowi_tunnel flowic_tun_key;
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index a9af1cc8c1bc..1b6b6dcb018d 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -183,9 +183,8 @@ _genl_register_family_with_ops_grps(struct genl_family *family,
183 (grps), ARRAY_SIZE(grps)) 183 (grps), ARRAY_SIZE(grps))
184 184
185int genl_unregister_family(struct genl_family *family); 185int genl_unregister_family(struct genl_family *family);
186void genl_notify(struct genl_family *family, 186void genl_notify(struct genl_family *family, struct sk_buff *skb,
187 struct sk_buff *skb, struct net *net, u32 portid, 187 struct genl_info *info, u32 group, gfp_t flags);
188 u32 group, struct nlmsghdr *nlh, gfp_t flags);
189 188
190struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info, 189struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info,
191 gfp_t flags); 190 gfp_t flags);
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 2c10a9f0c6d9..a62a051a3a2f 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -50,15 +50,6 @@ struct ieee802154_sechdr {
50 }; 50 };
51}; 51};
52 52
53struct ieee802154_addr {
54 u8 mode;
55 __le16 pan_id;
56 union {
57 __le16 short_addr;
58 __le64 extended_addr;
59 };
60};
61
62struct ieee802154_hdr_fc { 53struct ieee802154_hdr_fc {
63#if defined(__LITTLE_ENDIAN_BITFIELD) 54#if defined(__LITTLE_ENDIAN_BITFIELD)
64 u16 type:3, 55 u16 type:3,
@@ -99,7 +90,7 @@ struct ieee802154_hdr {
99 * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame 90 * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
100 * version, if SECEN is set. 91 * version, if SECEN is set.
101 */ 92 */
102int ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr); 93int ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr);
103 94
104/* pulls the entire 802.15.4 header off of the skb, including the security 95/* pulls the entire 802.15.4 header off of the skb, including the security
105 * header, and performs pan id decompression 96 * header, and performs pan id decompression
@@ -243,38 +234,6 @@ static inline struct ieee802154_mac_cb *mac_cb_init(struct sk_buff *skb)
243 return mac_cb(skb); 234 return mac_cb(skb);
244} 235}
245 236
246#define IEEE802154_LLSEC_KEY_SIZE 16
247
248struct ieee802154_llsec_key_id {
249 u8 mode;
250 u8 id;
251 union {
252 struct ieee802154_addr device_addr;
253 __le32 short_source;
254 __le64 extended_source;
255 };
256};
257
258struct ieee802154_llsec_key {
259 u8 frame_types;
260 u32 cmd_frame_ids;
261 u8 key[IEEE802154_LLSEC_KEY_SIZE];
262};
263
264struct ieee802154_llsec_key_entry {
265 struct list_head list;
266
267 struct ieee802154_llsec_key_id id;
268 struct ieee802154_llsec_key *key;
269};
270
271struct ieee802154_llsec_device_key {
272 struct list_head list;
273
274 struct ieee802154_llsec_key_id key_id;
275 u32 frame_counter;
276};
277
278enum { 237enum {
279 IEEE802154_LLSEC_DEVKEY_IGNORE, 238 IEEE802154_LLSEC_DEVKEY_IGNORE,
280 IEEE802154_LLSEC_DEVKEY_RESTRICT, 239 IEEE802154_LLSEC_DEVKEY_RESTRICT,
@@ -283,49 +242,6 @@ enum {
283 __IEEE802154_LLSEC_DEVKEY_MAX, 242 __IEEE802154_LLSEC_DEVKEY_MAX,
284}; 243};
285 244
286struct ieee802154_llsec_device {
287 struct list_head list;
288
289 __le16 pan_id;
290 __le16 short_addr;
291 __le64 hwaddr;
292 u32 frame_counter;
293 bool seclevel_exempt;
294
295 u8 key_mode;
296 struct list_head keys;
297};
298
299struct ieee802154_llsec_seclevel {
300 struct list_head list;
301
302 u8 frame_type;
303 u8 cmd_frame_id;
304 bool device_override;
305 u32 sec_levels;
306};
307
308struct ieee802154_llsec_params {
309 bool enabled;
310
311 __be32 frame_counter;
312 u8 out_level;
313 struct ieee802154_llsec_key_id out_key;
314
315 __le64 default_key_source;
316
317 __le16 pan_id;
318 __le64 hwaddr;
319 __le64 coord_hwaddr;
320 __le16 coord_shortaddr;
321};
322
323struct ieee802154_llsec_table {
324 struct list_head keys;
325 struct list_head devices;
326 struct list_head security_levels;
327};
328
329#define IEEE802154_MAC_SCAN_ED 0 245#define IEEE802154_MAC_SCAN_ED 0
330#define IEEE802154_MAC_SCAN_ACTIVE 1 246#define IEEE802154_MAC_SCAN_ACTIVE 1
331#define IEEE802154_MAC_SCAN_PASSIVE 2 247#define IEEE802154_MAC_SCAN_PASSIVE 2
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 6d539e4e5ba7..064cfbe639d0 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -25,17 +25,8 @@ struct sockaddr;
25int inet6_csk_bind_conflict(const struct sock *sk, 25int inet6_csk_bind_conflict(const struct sock *sk,
26 const struct inet_bind_bucket *tb, bool relax); 26 const struct inet_bind_bucket *tb, bool relax);
27 27
28struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6, 28struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
29 const struct request_sock *req); 29 const struct request_sock *req, u8 proto);
30
31struct request_sock *inet6_csk_search_req(struct sock *sk,
32 const __be16 rport,
33 const struct in6_addr *raddr,
34 const struct in6_addr *laddr,
35 const int iif);
36
37void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
38 const unsigned long timeout);
39 30
40void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); 31void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
41 32
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 279f83591971..109e3ee9108c 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -41,7 +41,8 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
41 41
42static inline void inet_ctl_sock_destroy(struct sock *sk) 42static inline void inet_ctl_sock_destroy(struct sock *sk)
43{ 43{
44 sock_release(sk->sk_socket); 44 if (sk)
45 sock_release(sk->sk_socket);
45} 46}
46 47
47#endif 48#endif
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 0320bbb7d7b5..481fe1c9044c 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -41,9 +41,11 @@ struct inet_connection_sock_af_ops {
41 int (*rebuild_header)(struct sock *sk); 41 int (*rebuild_header)(struct sock *sk);
42 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); 42 void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
43 int (*conn_request)(struct sock *sk, struct sk_buff *skb); 43 int (*conn_request)(struct sock *sk, struct sk_buff *skb);
44 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, 44 struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
45 struct request_sock *req, 45 struct request_sock *req,
46 struct dst_entry *dst); 46 struct dst_entry *dst,
47 struct request_sock *req_unhash,
48 bool *own_req);
47 u16 net_header_len; 49 u16 net_header_len;
48 u16 net_frag_header_len; 50 u16 net_frag_header_len;
49 u16 sockaddr_len; 51 u16 sockaddr_len;
@@ -258,31 +260,25 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
258 260
259struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); 261struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
260 262
261struct request_sock *inet_csk_search_req(struct sock *sk,
262 const __be16 rport,
263 const __be32 raddr,
264 const __be32 laddr);
265int inet_csk_bind_conflict(const struct sock *sk, 263int inet_csk_bind_conflict(const struct sock *sk,
266 const struct inet_bind_bucket *tb, bool relax); 264 const struct inet_bind_bucket *tb, bool relax);
267int inet_csk_get_port(struct sock *sk, unsigned short snum); 265int inet_csk_get_port(struct sock *sk, unsigned short snum);
268 266
269struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4, 267struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
270 const struct request_sock *req); 268 const struct request_sock *req);
271struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk, 269struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
270 struct sock *newsk,
272 const struct request_sock *req); 271 const struct request_sock *req);
273 272
274static inline void inet_csk_reqsk_queue_add(struct sock *sk, 273void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
275 struct request_sock *req, 274 struct sock *child);
276 struct sock *child)
277{
278 reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
279}
280
281void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, 275void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
282 unsigned long timeout); 276 unsigned long timeout);
277struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
278 struct request_sock *req,
279 bool own_req);
283 280
284static inline void inet_csk_reqsk_queue_added(struct sock *sk, 281static inline void inet_csk_reqsk_queue_added(struct sock *sk)
285 const unsigned long timeout)
286{ 282{
287 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); 283 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
288} 284}
@@ -299,10 +295,11 @@ static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
299 295
300static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) 296static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
301{ 297{
302 return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); 298 return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
303} 299}
304 300
305void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); 301void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
302void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
306 303
307void inet_csk_destroy_sock(struct sock *sk); 304void inet_csk_destroy_sock(struct sock *sk);
308void inet_csk_prepare_forced_close(struct sock *sk); 305void inet_csk_prepare_forced_close(struct sock *sk);
@@ -316,7 +313,7 @@ static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
316 (POLLIN | POLLRDNORM) : 0; 313 (POLLIN | POLLRDNORM) : 0;
317} 314}
318 315
319int inet_csk_listen_start(struct sock *sk, const int nr_table_entries); 316int inet_csk_listen_start(struct sock *sk, int backlog);
320void inet_csk_listen_stop(struct sock *sk); 317void inet_csk_listen_stop(struct sock *sk);
321 318
322void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); 319void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 53eead2da743..ac42bbb37b2d 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -108,7 +108,15 @@ struct inet_frags {
108int inet_frags_init(struct inet_frags *); 108int inet_frags_init(struct inet_frags *);
109void inet_frags_fini(struct inet_frags *); 109void inet_frags_fini(struct inet_frags *);
110 110
111void inet_frags_init_net(struct netns_frags *nf); 111static inline int inet_frags_init_net(struct netns_frags *nf)
112{
113 return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
114}
115static inline void inet_frags_uninit_net(struct netns_frags *nf)
116{
117 percpu_counter_destroy(&nf->mem);
118}
119
112void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); 120void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
113 121
114void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); 122void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
@@ -154,11 +162,6 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
154 __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch); 162 __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
155} 163}
156 164
157static inline void init_frag_mem_limit(struct netns_frags *nf)
158{
159 percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
160}
161
162static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) 165static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
163{ 166{
164 unsigned int res; 167 unsigned int res;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b07d126694a7..de2e3ade6102 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -199,13 +199,14 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk)
199} 199}
200 200
201/* Caller must disable local BH processing. */ 201/* Caller must disable local BH processing. */
202int __inet_inherit_port(struct sock *sk, struct sock *child); 202int __inet_inherit_port(const struct sock *sk, struct sock *child);
203 203
204void inet_put_port(struct sock *sk); 204void inet_put_port(struct sock *sk);
205 205
206void inet_hashinfo_init(struct inet_hashinfo *h); 206void inet_hashinfo_init(struct inet_hashinfo *h);
207 207
208void __inet_hash_nolisten(struct sock *sk, struct sock *osk); 208bool inet_ehash_insert(struct sock *sk, struct sock *osk);
209bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
209void __inet_hash(struct sock *sk, struct sock *osk); 210void __inet_hash(struct sock *sk, struct sock *osk);
210void inet_hash(struct sock *sk); 211void inet_hash(struct sock *sk);
211void inet_unhash(struct sock *sk); 212void inet_unhash(struct sock *sk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 47eb67b08abd..2134e6d815bc 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -210,6 +210,18 @@ struct inet_sock {
210#define IP_CMSG_ORIGDSTADDR BIT(6) 210#define IP_CMSG_ORIGDSTADDR BIT(6)
211#define IP_CMSG_CHECKSUM BIT(7) 211#define IP_CMSG_CHECKSUM BIT(7)
212 212
213/* SYNACK messages might be attached to request sockets.
214 * Some places want to reach the listener in this case.
215 */
216static inline struct sock *skb_to_full_sk(const struct sk_buff *skb)
217{
218 struct sock *sk = skb->sk;
219
220 if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
221 sk = inet_reqsk(sk)->rsk_listener;
222 return sk;
223}
224
213static inline struct inet_sock *inet_sk(const struct sock *sk) 225static inline struct inet_sock *inet_sk(const struct sock *sk)
214{ 226{
215 return (struct inet_sock *)sk; 227 return (struct inet_sock *)sk;
@@ -245,7 +257,8 @@ static inline unsigned int __inet_ehashfn(const __be32 laddr,
245} 257}
246 258
247struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, 259struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
248 struct sock *sk_listener); 260 struct sock *sk_listener,
261 bool attach_listener);
249 262
250static inline __u8 inet_sk_flowi_flags(const struct sock *sk) 263static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
251{ 264{
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 186f3a1e1b1f..c9b3eb70f340 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -70,6 +70,7 @@ struct inet_timewait_sock {
70#define tw_dport __tw_common.skc_dport 70#define tw_dport __tw_common.skc_dport
71#define tw_num __tw_common.skc_num 71#define tw_num __tw_common.skc_num
72#define tw_cookie __tw_common.skc_cookie 72#define tw_cookie __tw_common.skc_cookie
73#define tw_dr __tw_common.skc_tw_dr
73 74
74 int tw_timeout; 75 int tw_timeout;
75 volatile unsigned char tw_substate; 76 volatile unsigned char tw_substate;
@@ -88,7 +89,6 @@ struct inet_timewait_sock {
88 kmemcheck_bitfield_end(flags); 89 kmemcheck_bitfield_end(flags);
89 struct timer_list tw_timer; 90 struct timer_list tw_timer;
90 struct inet_bind_bucket *tw_tb; 91 struct inet_bind_bucket *tw_tb;
91 struct inet_timewait_death_row *tw_dr;
92}; 92};
93#define tw_tclass tw_tos 93#define tw_tclass tw_tos
94 94
@@ -113,12 +113,12 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
113void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, 113void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
114 bool rearm); 114 bool rearm);
115 115
116static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) 116static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
117{ 117{
118 __inet_twsk_schedule(tw, timeo, false); 118 __inet_twsk_schedule(tw, timeo, false);
119} 119}
120 120
121static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo) 121static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
122{ 122{
123 __inet_twsk_schedule(tw, timeo, true); 123 __inet_twsk_schedule(tw, timeo, true);
124} 124}
diff --git a/include/net/ip.h b/include/net/ip.h
index 9b9ca2839399..1a98f1ca1638 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -100,24 +100,20 @@ int igmp_mc_init(void);
100 * Functions provided by ip.c 100 * Functions provided by ip.c
101 */ 101 */
102 102
103int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, 103int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
104 __be32 saddr, __be32 daddr, 104 __be32 saddr, __be32 daddr,
105 struct ip_options_rcu *opt); 105 struct ip_options_rcu *opt);
106int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, 106int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
107 struct net_device *orig_dev); 107 struct net_device *orig_dev);
108int ip_local_deliver(struct sk_buff *skb); 108int ip_local_deliver(struct sk_buff *skb);
109int ip_mr_input(struct sk_buff *skb); 109int ip_mr_input(struct sk_buff *skb);
110int ip_output(struct sock *sk, struct sk_buff *skb); 110int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
111int ip_mc_output(struct sock *sk, struct sk_buff *skb); 111int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
112int ip_do_fragment(struct sock *sk, struct sk_buff *skb, 112int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
113 int (*output)(struct sock *, struct sk_buff *)); 113 int (*output)(struct net *, struct sock *, struct sk_buff *));
114void ip_send_check(struct iphdr *ip); 114void ip_send_check(struct iphdr *ip);
115int __ip_local_out(struct sk_buff *skb); 115int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
116int ip_local_out_sk(struct sock *sk, struct sk_buff *skb); 116int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
117static inline int ip_local_out(struct sk_buff *skb)
118{
119 return ip_local_out_sk(skb->sk, skb);
120}
121 117
122int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); 118int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
123void ip_init(void); 119void ip_init(void);
@@ -282,10 +278,12 @@ int ip_decrease_ttl(struct iphdr *iph)
282} 278}
283 279
284static inline 280static inline
285int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) 281int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
286{ 282{
287 return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO || 283 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
288 (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT && 284
285 return pmtudisc == IP_PMTUDISC_DO ||
286 (pmtudisc == IP_PMTUDISC_WANT &&
289 !(dst_metric_locked(dst, RTAX_MTU))); 287 !(dst_metric_locked(dst, RTAX_MTU)));
290} 288}
291 289
@@ -321,12 +319,15 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
321 319
322static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) 320static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
323{ 321{
324 if (!skb->sk || ip_sk_use_pmtu(skb->sk)) { 322 struct sock *sk = skb->sk;
323
324 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
325 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; 325 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
326
326 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); 327 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
327 } else {
328 return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
329 } 328 }
329
330 return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
330} 331}
331 332
332u32 ip_idents_reserve(u32 hash, int segs); 333u32 ip_idents_reserve(u32 hash, int segs);
@@ -505,11 +506,11 @@ static inline bool ip_defrag_user_in_between(u32 user,
505 return user >= lower_bond && user <= upper_bond; 506 return user >= lower_bond && user <= upper_bond;
506} 507}
507 508
508int ip_defrag(struct sk_buff *skb, u32 user); 509int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
509#ifdef CONFIG_INET 510#ifdef CONFIG_INET
510struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user); 511struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
511#else 512#else
512static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) 513static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
513{ 514{
514 return skb; 515 return skb;
515} 516}
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index aaf9700fc9e5..fb961a576abe 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -167,7 +167,8 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
167 167
168static inline u32 rt6_get_cookie(const struct rt6_info *rt) 168static inline u32 rt6_get_cookie(const struct rt6_info *rt)
169{ 169{
170 if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE)) 170 if (rt->rt6i_flags & RTF_PCPU ||
171 (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
171 rt = (struct rt6_info *)(rt->dst.from); 172 rt = (struct rt6_info *)(rt->dst.from);
172 173
173 return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 174 return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 297629aadb19..2bfb2ad2fab1 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -173,8 +173,8 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
173 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)); 173 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
174} 174}
175 175
176int ip6_fragment(struct sock *sk, struct sk_buff *skb, 176int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
177 int (*output)(struct sock *, struct sk_buff *)); 177 int (*output)(struct net *, struct sock *, struct sk_buff *));
178 178
179static inline int ip6_skb_dst_mtu(struct sk_buff *skb) 179static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
180{ 180{
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index fa915fa0f703..ff788b665277 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -87,14 +87,15 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
87 int pkt_len, err; 87 int pkt_len, err;
88 88
89 pkt_len = skb->len - skb_inner_network_offset(skb); 89 pkt_len = skb->len - skb_inner_network_offset(skb);
90 err = ip6_local_out_sk(sk, skb); 90 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
91 91
92 if (net_xmit_eval(err) == 0) { 92 if (net_xmit_eval(err) == 0) {
93 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 93 struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
94 u64_stats_update_begin(&tstats->syncp); 94 u64_stats_update_begin(&tstats->syncp);
95 tstats->tx_bytes += pkt_len; 95 tstats->tx_bytes += pkt_len;
96 tstats->tx_packets++; 96 tstats->tx_packets++;
97 u64_stats_update_end(&tstats->syncp); 97 u64_stats_update_end(&tstats->syncp);
98 put_cpu_ptr(tstats);
98 } else { 99 } else {
99 stats->tx_errors++; 100 stats->tx_errors++;
100 stats->tx_aborted_errors++; 101 stats->tx_aborted_errors++;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 727d6e9a9685..9f4df68105ab 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -79,7 +79,7 @@ struct fib_nh {
79 unsigned char nh_scope; 79 unsigned char nh_scope;
80#ifdef CONFIG_IP_ROUTE_MULTIPATH 80#ifdef CONFIG_IP_ROUTE_MULTIPATH
81 int nh_weight; 81 int nh_weight;
82 int nh_power; 82 atomic_t nh_upper_bound;
83#endif 83#endif
84#ifdef CONFIG_IP_ROUTE_CLASSID 84#ifdef CONFIG_IP_ROUTE_CLASSID
85 __u32 nh_tclassid; 85 __u32 nh_tclassid;
@@ -118,7 +118,7 @@ struct fib_info {
118#define fib_advmss fib_metrics[RTAX_ADVMSS-1] 118#define fib_advmss fib_metrics[RTAX_ADVMSS-1]
119 int fib_nhs; 119 int fib_nhs;
120#ifdef CONFIG_IP_ROUTE_MULTIPATH 120#ifdef CONFIG_IP_ROUTE_MULTIPATH
121 int fib_power; 121 int fib_weight;
122#endif 122#endif
123 struct rcu_head rcu; 123 struct rcu_head rcu;
124 struct fib_nh fib_nh[0]; 124 struct fib_nh fib_nh[0];
@@ -317,10 +317,20 @@ void fib_flush_external(struct net *net);
317 317
318/* Exported by fib_semantics.c */ 318/* Exported by fib_semantics.c */
319int ip_fib_check_default(__be32 gw, struct net_device *dev); 319int ip_fib_check_default(__be32 gw, struct net_device *dev);
320int fib_sync_down_dev(struct net_device *dev, unsigned long event); 320int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
321int fib_sync_down_addr(struct net *net, __be32 local); 321int fib_sync_down_addr(struct net *net, __be32 local);
322int fib_sync_up(struct net_device *dev, unsigned int nh_flags); 322int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
323void fib_select_multipath(struct fib_result *res); 323
324extern u32 fib_multipath_secret __read_mostly;
325
326static inline int fib_multipath_hash(__be32 saddr, __be32 daddr)
327{
328 return jhash_2words(saddr, daddr, fib_multipath_secret) >> 1;
329}
330
331void fib_select_multipath(struct fib_result *res, int hash);
332void fib_select_path(struct net *net, struct fib_result *res,
333 struct flowi4 *fl4, int mp_hash);
324 334
325/* Exported by fib_trie.c */ 335/* Exported by fib_trie.c */
326void fib_trie_init(void); 336void fib_trie_init(void);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index f6dafec9102c..62a750a6a8f8 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -287,12 +287,13 @@ static inline void iptunnel_xmit_stats(int err,
287 struct pcpu_sw_netstats __percpu *stats) 287 struct pcpu_sw_netstats __percpu *stats)
288{ 288{
289 if (err > 0) { 289 if (err > 0) {
290 struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats); 290 struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
291 291
292 u64_stats_update_begin(&tstats->syncp); 292 u64_stats_update_begin(&tstats->syncp);
293 tstats->tx_bytes += err; 293 tstats->tx_bytes += err;
294 tstats->tx_packets++; 294 tstats->tx_packets++;
295 u64_stats_update_end(&tstats->syncp); 295 u64_stats_update_end(&tstats->syncp);
296 put_cpu_ptr(tstats);
296 } else if (err < 0) { 297 } else if (err < 0) {
297 err_stats->tx_errors++; 298 err_stats->tx_errors++;
298 err_stats->tx_aborted_errors++; 299 err_stats->tx_aborted_errors++;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 9b9ca87a4210..0816c872b689 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -29,65 +29,15 @@
29#endif 29#endif
30#include <net/net_namespace.h> /* Netw namespace */ 30#include <net/net_namespace.h> /* Netw namespace */
31 31
32#define IP_VS_HDR_INVERSE 1
33#define IP_VS_HDR_ICMP 2
34
32/* Generic access of ipvs struct */ 35/* Generic access of ipvs struct */
33static inline struct netns_ipvs *net_ipvs(struct net* net) 36static inline struct netns_ipvs *net_ipvs(struct net* net)
34{ 37{
35 return net->ipvs; 38 return net->ipvs;
36} 39}
37 40
38/* Get net ptr from skb in traffic cases
39 * use skb_sknet when call is from userland (ioctl or netlink)
40 */
41static inline struct net *skb_net(const struct sk_buff *skb)
42{
43#ifdef CONFIG_NET_NS
44#ifdef CONFIG_IP_VS_DEBUG
45 /*
46 * This is used for debug only.
47 * Start with the most likely hit
48 * End with BUG
49 */
50 if (likely(skb->dev && dev_net(skb->dev)))
51 return dev_net(skb->dev);
52 if (skb_dst(skb) && skb_dst(skb)->dev)
53 return dev_net(skb_dst(skb)->dev);
54 WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
55 __func__, __LINE__);
56 if (likely(skb->sk && sock_net(skb->sk)))
57 return sock_net(skb->sk);
58 pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
59 __func__, __LINE__);
60 BUG();
61#else
62 return dev_net(skb->dev ? : skb_dst(skb)->dev);
63#endif
64#else
65 return &init_net;
66#endif
67}
68
69static inline struct net *skb_sknet(const struct sk_buff *skb)
70{
71#ifdef CONFIG_NET_NS
72#ifdef CONFIG_IP_VS_DEBUG
73 /* Start with the most likely hit */
74 if (likely(skb->sk && sock_net(skb->sk)))
75 return sock_net(skb->sk);
76 WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
77 __func__, __LINE__);
78 if (likely(skb->dev && dev_net(skb->dev)))
79 return dev_net(skb->dev);
80 pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
81 __func__, __LINE__);
82 BUG();
83#else
84 return sock_net(skb->sk);
85#endif
86#else
87 return &init_net;
88#endif
89}
90
91/* This one needed for single_open_net since net is stored directly in 41/* This one needed for single_open_net since net is stored directly in
92 * private not as a struct i.e. seq_file_net can't be used. 42 * private not as a struct i.e. seq_file_net can't be used.
93 */ 43 */
@@ -104,6 +54,8 @@ static inline struct net *seq_file_single_net(struct seq_file *seq)
104extern int ip_vs_conn_tab_size; 54extern int ip_vs_conn_tab_size;
105 55
106struct ip_vs_iphdr { 56struct ip_vs_iphdr {
57 int hdr_flags; /* ipvs flags */
58 __u32 off; /* Where IP or IPv4 header starts */
107 __u32 len; /* IPv4 simply where L4 starts 59 __u32 len; /* IPv4 simply where L4 starts
108 * IPv6 where L4 Transport Header starts */ 60 * IPv6 where L4 Transport Header starts */
109 __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ 61 __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
@@ -120,48 +72,89 @@ static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
120 return skb_header_pointer(skb, offset, len, buffer); 72 return skb_header_pointer(skb, offset, len, buffer);
121} 73}
122 74
123static inline void
124ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr)
125{
126 const struct iphdr *iph = nh;
127
128 iphdr->len = iph->ihl * 4;
129 iphdr->fragoffs = 0;
130 iphdr->protocol = iph->protocol;
131 iphdr->saddr.ip = iph->saddr;
132 iphdr->daddr.ip = iph->daddr;
133}
134
135/* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6. 75/* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6.
136 * IPv6 requires some extra work, as finding proper header position, 76 * IPv6 requires some extra work, as finding proper header position,
137 * depend on the IPv6 extension headers. 77 * depend on the IPv6 extension headers.
138 */ 78 */
139static inline void 79static inline int
140ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr) 80ip_vs_fill_iph_skb_off(int af, const struct sk_buff *skb, int offset,
81 int hdr_flags, struct ip_vs_iphdr *iphdr)
141{ 82{
83 iphdr->hdr_flags = hdr_flags;
84 iphdr->off = offset;
85
142#ifdef CONFIG_IP_VS_IPV6 86#ifdef CONFIG_IP_VS_IPV6
143 if (af == AF_INET6) { 87 if (af == AF_INET6) {
144 const struct ipv6hdr *iph = 88 struct ipv6hdr _iph;
145 (struct ipv6hdr *)skb_network_header(skb); 89 const struct ipv6hdr *iph = skb_header_pointer(
90 skb, offset, sizeof(_iph), &_iph);
91 if (!iph)
92 return 0;
93
146 iphdr->saddr.in6 = iph->saddr; 94 iphdr->saddr.in6 = iph->saddr;
147 iphdr->daddr.in6 = iph->daddr; 95 iphdr->daddr.in6 = iph->daddr;
148 /* ipv6_find_hdr() updates len, flags */ 96 /* ipv6_find_hdr() updates len, flags */
149 iphdr->len = 0; 97 iphdr->len = offset;
150 iphdr->flags = 0; 98 iphdr->flags = 0;
151 iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1, 99 iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1,
152 &iphdr->fragoffs, 100 &iphdr->fragoffs,
153 &iphdr->flags); 101 &iphdr->flags);
102 if (iphdr->protocol < 0)
103 return 0;
154 } else 104 } else
155#endif 105#endif
156 { 106 {
157 const struct iphdr *iph = 107 struct iphdr _iph;
158 (struct iphdr *)skb_network_header(skb); 108 const struct iphdr *iph = skb_header_pointer(
159 iphdr->len = iph->ihl * 4; 109 skb, offset, sizeof(_iph), &_iph);
110 if (!iph)
111 return 0;
112
113 iphdr->len = offset + iph->ihl * 4;
160 iphdr->fragoffs = 0; 114 iphdr->fragoffs = 0;
161 iphdr->protocol = iph->protocol; 115 iphdr->protocol = iph->protocol;
162 iphdr->saddr.ip = iph->saddr; 116 iphdr->saddr.ip = iph->saddr;
163 iphdr->daddr.ip = iph->daddr; 117 iphdr->daddr.ip = iph->daddr;
164 } 118 }
119
120 return 1;
121}
122
123static inline int
124ip_vs_fill_iph_skb_icmp(int af, const struct sk_buff *skb, int offset,
125 bool inverse, struct ip_vs_iphdr *iphdr)
126{
127 int hdr_flags = IP_VS_HDR_ICMP;
128
129 if (inverse)
130 hdr_flags |= IP_VS_HDR_INVERSE;
131
132 return ip_vs_fill_iph_skb_off(af, skb, offset, hdr_flags, iphdr);
133}
134
135static inline int
136ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, bool inverse,
137 struct ip_vs_iphdr *iphdr)
138{
139 int hdr_flags = 0;
140
141 if (inverse)
142 hdr_flags |= IP_VS_HDR_INVERSE;
143
144 return ip_vs_fill_iph_skb_off(af, skb, skb_network_offset(skb),
145 hdr_flags, iphdr);
146}
147
148static inline bool
149ip_vs_iph_inverse(const struct ip_vs_iphdr *iph)
150{
151 return !!(iph->hdr_flags & IP_VS_HDR_INVERSE);
152}
153
154static inline bool
155ip_vs_iph_icmp(const struct ip_vs_iphdr *iph)
156{
157 return !!(iph->hdr_flags & IP_VS_HDR_ICMP);
165} 158}
166 159
167static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, 160static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
@@ -437,26 +430,27 @@ struct ip_vs_protocol {
437 430
438 void (*exit)(struct ip_vs_protocol *pp); 431 void (*exit)(struct ip_vs_protocol *pp);
439 432
440 int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd); 433 int (*init_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd);
441 434
442 void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd); 435 void (*exit_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd);
443 436
444 int (*conn_schedule)(int af, struct sk_buff *skb, 437 int (*conn_schedule)(struct netns_ipvs *ipvs,
438 int af, struct sk_buff *skb,
445 struct ip_vs_proto_data *pd, 439 struct ip_vs_proto_data *pd,
446 int *verdict, struct ip_vs_conn **cpp, 440 int *verdict, struct ip_vs_conn **cpp,
447 struct ip_vs_iphdr *iph); 441 struct ip_vs_iphdr *iph);
448 442
449 struct ip_vs_conn * 443 struct ip_vs_conn *
450 (*conn_in_get)(int af, 444 (*conn_in_get)(struct netns_ipvs *ipvs,
445 int af,
451 const struct sk_buff *skb, 446 const struct sk_buff *skb,
452 const struct ip_vs_iphdr *iph, 447 const struct ip_vs_iphdr *iph);
453 int inverse);
454 448
455 struct ip_vs_conn * 449 struct ip_vs_conn *
456 (*conn_out_get)(int af, 450 (*conn_out_get)(struct netns_ipvs *ipvs,
451 int af,
457 const struct sk_buff *skb, 452 const struct sk_buff *skb,
458 const struct ip_vs_iphdr *iph, 453 const struct ip_vs_iphdr *iph);
459 int inverse);
460 454
461 int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 455 int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp,
462 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 456 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph);
@@ -473,9 +467,9 @@ struct ip_vs_protocol {
473 const struct sk_buff *skb, 467 const struct sk_buff *skb,
474 struct ip_vs_proto_data *pd); 468 struct ip_vs_proto_data *pd);
475 469
476 int (*register_app)(struct net *net, struct ip_vs_app *inc); 470 int (*register_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc);
477 471
478 void (*unregister_app)(struct net *net, struct ip_vs_app *inc); 472 void (*unregister_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc);
479 473
480 int (*app_conn_bind)(struct ip_vs_conn *cp); 474 int (*app_conn_bind)(struct ip_vs_conn *cp);
481 475
@@ -497,11 +491,11 @@ struct ip_vs_proto_data {
497}; 491};
498 492
499struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto); 493struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto);
500struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net, 494struct ip_vs_proto_data *ip_vs_proto_data_get(struct netns_ipvs *ipvs,
501 unsigned short proto); 495 unsigned short proto);
502 496
503struct ip_vs_conn_param { 497struct ip_vs_conn_param {
504 struct net *net; 498 struct netns_ipvs *ipvs;
505 const union nf_inet_addr *caddr; 499 const union nf_inet_addr *caddr;
506 const union nf_inet_addr *vaddr; 500 const union nf_inet_addr *vaddr;
507 __be16 cport; 501 __be16 cport;
@@ -528,9 +522,7 @@ struct ip_vs_conn {
528 volatile __u32 flags; /* status flags */ 522 volatile __u32 flags; /* status flags */
529 __u16 protocol; /* Which protocol (TCP/UDP) */ 523 __u16 protocol; /* Which protocol (TCP/UDP) */
530 __u16 daf; /* Address family of the dest */ 524 __u16 daf; /* Address family of the dest */
531#ifdef CONFIG_NET_NS 525 struct netns_ipvs *ipvs;
532 struct net *net; /* Name space */
533#endif
534 526
535 /* counter and timer */ 527 /* counter and timer */
536 atomic_t refcnt; /* reference count */ 528 atomic_t refcnt; /* reference count */
@@ -577,33 +569,6 @@ struct ip_vs_conn {
577 struct rcu_head rcu_head; 569 struct rcu_head rcu_head;
578}; 570};
579 571
580/* To save some memory in conn table when name space is disabled. */
581static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
582{
583#ifdef CONFIG_NET_NS
584 return cp->net;
585#else
586 return &init_net;
587#endif
588}
589
590static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
591{
592#ifdef CONFIG_NET_NS
593 cp->net = net;
594#endif
595}
596
597static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
598 struct net *net)
599{
600#ifdef CONFIG_NET_NS
601 return cp->net == net;
602#else
603 return 1;
604#endif
605}
606
607/* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user 572/* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user
608 * for IPv6 support. 573 * for IPv6 support.
609 * 574 *
@@ -663,7 +628,7 @@ struct ip_vs_service {
663 unsigned int flags; /* service status flags */ 628 unsigned int flags; /* service status flags */
664 unsigned int timeout; /* persistent timeout in ticks */ 629 unsigned int timeout; /* persistent timeout in ticks */
665 __be32 netmask; /* grouping granularity, mask/plen */ 630 __be32 netmask; /* grouping granularity, mask/plen */
666 struct net *net; 631 struct netns_ipvs *ipvs;
667 632
668 struct list_head destinations; /* real server d-linked list */ 633 struct list_head destinations; /* real server d-linked list */
669 __u32 num_dests; /* number of servers */ 634 __u32 num_dests; /* number of servers */
@@ -953,6 +918,8 @@ struct netns_ipvs {
953 int sysctl_pmtu_disc; 918 int sysctl_pmtu_disc;
954 int sysctl_backup_only; 919 int sysctl_backup_only;
955 int sysctl_conn_reuse_mode; 920 int sysctl_conn_reuse_mode;
921 int sysctl_schedule_icmp;
922 int sysctl_ignore_tunneled;
956 923
957 /* ip_vs_lblc */ 924 /* ip_vs_lblc */
958 int sysctl_lblc_expiration; 925 int sysctl_lblc_expiration;
@@ -1071,6 +1038,21 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
1071 return ipvs->sysctl_conn_reuse_mode; 1038 return ipvs->sysctl_conn_reuse_mode;
1072} 1039}
1073 1040
1041static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs)
1042{
1043 return ipvs->sysctl_schedule_icmp;
1044}
1045
1046static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs)
1047{
1048 return ipvs->sysctl_ignore_tunneled;
1049}
1050
1051static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
1052{
1053 return ipvs->sysctl_cache_bypass;
1054}
1055
1074#else 1056#else
1075 1057
1076static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 1058static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1143,6 +1125,21 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
1143 return 1; 1125 return 1;
1144} 1126}
1145 1127
1128static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs)
1129{
1130 return 0;
1131}
1132
1133static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs)
1134{
1135 return 0;
1136}
1137
1138static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
1139{
1140 return 0;
1141}
1142
1146#endif 1143#endif
1147 1144
1148/* IPVS core functions 1145/* IPVS core functions
@@ -1164,14 +1161,14 @@ enum {
1164 IP_VS_DIR_LAST, 1161 IP_VS_DIR_LAST,
1165}; 1162};
1166 1163
1167static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol, 1164static inline void ip_vs_conn_fill_param(struct netns_ipvs *ipvs, int af, int protocol,
1168 const union nf_inet_addr *caddr, 1165 const union nf_inet_addr *caddr,
1169 __be16 cport, 1166 __be16 cport,
1170 const union nf_inet_addr *vaddr, 1167 const union nf_inet_addr *vaddr,
1171 __be16 vport, 1168 __be16 vport,
1172 struct ip_vs_conn_param *p) 1169 struct ip_vs_conn_param *p)
1173{ 1170{
1174 p->net = net; 1171 p->ipvs = ipvs;
1175 p->af = af; 1172 p->af = af;
1176 p->protocol = protocol; 1173 p->protocol = protocol;
1177 p->caddr = caddr; 1174 p->caddr = caddr;
@@ -1185,15 +1182,15 @@ static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
1185struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p); 1182struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
1186struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); 1183struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
1187 1184
1188struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, 1185struct ip_vs_conn * ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af,
1189 const struct ip_vs_iphdr *iph, 1186 const struct sk_buff *skb,
1190 int inverse); 1187 const struct ip_vs_iphdr *iph);
1191 1188
1192struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); 1189struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
1193 1190
1194struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, 1191struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
1195 const struct ip_vs_iphdr *iph, 1192 const struct sk_buff *skb,
1196 int inverse); 1193 const struct ip_vs_iphdr *iph);
1197 1194
1198/* Get reference to gain full access to conn. 1195/* Get reference to gain full access to conn.
1199 * By default, RCU read-side critical sections have access only to 1196 * By default, RCU read-side critical sections have access only to
@@ -1221,9 +1218,9 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
1221 1218
1222const char *ip_vs_state_name(__u16 proto, int state); 1219const char *ip_vs_state_name(__u16 proto, int state);
1223 1220
1224void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp); 1221void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
1225int ip_vs_check_template(struct ip_vs_conn *ct); 1222int ip_vs_check_template(struct ip_vs_conn *ct);
1226void ip_vs_random_dropentry(struct net *net); 1223void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
1227int ip_vs_conn_init(void); 1224int ip_vs_conn_init(void);
1228void ip_vs_conn_cleanup(void); 1225void ip_vs_conn_cleanup(void);
1229 1226
@@ -1288,29 +1285,29 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
1288} 1285}
1289 1286
1290/* IPVS netns init & cleanup functions */ 1287/* IPVS netns init & cleanup functions */
1291int ip_vs_estimator_net_init(struct net *net); 1288int ip_vs_estimator_net_init(struct netns_ipvs *ipvs);
1292int ip_vs_control_net_init(struct net *net); 1289int ip_vs_control_net_init(struct netns_ipvs *ipvs);
1293int ip_vs_protocol_net_init(struct net *net); 1290int ip_vs_protocol_net_init(struct netns_ipvs *ipvs);
1294int ip_vs_app_net_init(struct net *net); 1291int ip_vs_app_net_init(struct netns_ipvs *ipvs);
1295int ip_vs_conn_net_init(struct net *net); 1292int ip_vs_conn_net_init(struct netns_ipvs *ipvs);
1296int ip_vs_sync_net_init(struct net *net); 1293int ip_vs_sync_net_init(struct netns_ipvs *ipvs);
1297void ip_vs_conn_net_cleanup(struct net *net); 1294void ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs);
1298void ip_vs_app_net_cleanup(struct net *net); 1295void ip_vs_app_net_cleanup(struct netns_ipvs *ipvs);
1299void ip_vs_protocol_net_cleanup(struct net *net); 1296void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs);
1300void ip_vs_control_net_cleanup(struct net *net); 1297void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs);
1301void ip_vs_estimator_net_cleanup(struct net *net); 1298void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs);
1302void ip_vs_sync_net_cleanup(struct net *net); 1299void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs);
1303void ip_vs_service_net_cleanup(struct net *net); 1300void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs);
1304 1301
1305/* IPVS application functions 1302/* IPVS application functions
1306 * (from ip_vs_app.c) 1303 * (from ip_vs_app.c)
1307 */ 1304 */
1308#define IP_VS_APP_MAX_PORTS 8 1305#define IP_VS_APP_MAX_PORTS 8
1309struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app); 1306struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app);
1310void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app); 1307void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app);
1311int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1308int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1312void ip_vs_unbind_app(struct ip_vs_conn *cp); 1309void ip_vs_unbind_app(struct ip_vs_conn *cp);
1313int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto, 1310int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
1314 __u16 port); 1311 __u16 port);
1315int ip_vs_app_inc_get(struct ip_vs_app *inc); 1312int ip_vs_app_inc_get(struct ip_vs_app *inc);
1316void ip_vs_app_inc_put(struct ip_vs_app *inc); 1313void ip_vs_app_inc_put(struct ip_vs_app *inc);
@@ -1375,10 +1372,10 @@ extern struct ip_vs_stats ip_vs_stats;
1375extern int sysctl_ip_vs_sync_ver; 1372extern int sysctl_ip_vs_sync_ver;
1376 1373
1377struct ip_vs_service * 1374struct ip_vs_service *
1378ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol, 1375ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol,
1379 const union nf_inet_addr *vaddr, __be16 vport); 1376 const union nf_inet_addr *vaddr, __be16 vport);
1380 1377
1381bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol, 1378bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
1382 const union nf_inet_addr *daddr, __be16 dport); 1379 const union nf_inet_addr *daddr, __be16 dport);
1383 1380
1384int ip_vs_use_count_inc(void); 1381int ip_vs_use_count_inc(void);
@@ -1388,7 +1385,7 @@ void ip_vs_unregister_nl_ioctl(void);
1388int ip_vs_control_init(void); 1385int ip_vs_control_init(void);
1389void ip_vs_control_cleanup(void); 1386void ip_vs_control_cleanup(void);
1390struct ip_vs_dest * 1387struct ip_vs_dest *
1391ip_vs_find_dest(struct net *net, int svc_af, int dest_af, 1388ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af,
1392 const union nf_inet_addr *daddr, __be16 dport, 1389 const union nf_inet_addr *daddr, __be16 dport,
1393 const union nf_inet_addr *vaddr, __be16 vport, 1390 const union nf_inet_addr *vaddr, __be16 vport,
1394 __u16 protocol, __u32 fwmark, __u32 flags); 1391 __u16 protocol, __u32 fwmark, __u32 flags);
@@ -1414,14 +1411,14 @@ static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
1414/* IPVS sync daemon data and function prototypes 1411/* IPVS sync daemon data and function prototypes
1415 * (from ip_vs_sync.c) 1412 * (from ip_vs_sync.c)
1416 */ 1413 */
1417int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *cfg, 1414int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *cfg,
1418 int state); 1415 int state);
1419int stop_sync_thread(struct net *net, int state); 1416int stop_sync_thread(struct netns_ipvs *ipvs, int state);
1420void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); 1417void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts);
1421 1418
1422/* IPVS rate estimator prototypes (from ip_vs_est.c) */ 1419/* IPVS rate estimator prototypes (from ip_vs_est.c) */
1423void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats); 1420void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats);
1424void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats); 1421void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats);
1425void ip_vs_zero_estimator(struct ip_vs_stats *stats); 1422void ip_vs_zero_estimator(struct ip_vs_stats *stats);
1426void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats); 1423void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats);
1427 1424
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 711cca428cc8..e1a10b0ac0b0 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -807,12 +807,12 @@ static inline u8 ip6_tclass(__be32 flowinfo)
807int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, 807int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
808 struct packet_type *pt, struct net_device *orig_dev); 808 struct packet_type *pt, struct net_device *orig_dev);
809 809
810int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb); 810int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
811 811
812/* 812/*
813 * upper-layer output functions 813 * upper-layer output functions
814 */ 814 */
815int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 815int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
816 struct ipv6_txoptions *opt, int tclass); 816 struct ipv6_txoptions *opt, int tclass);
817 817
818int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); 818int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
@@ -849,7 +849,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
849 849
850int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, 850int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
851 struct flowi6 *fl6); 851 struct flowi6 *fl6);
852struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, 852struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
853 const struct in6_addr *final_dst); 853 const struct in6_addr *final_dst);
854struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, 854struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
855 const struct in6_addr *final_dst); 855 const struct in6_addr *final_dst);
@@ -860,14 +860,13 @@ struct dst_entry *ip6_blackhole_route(struct net *net,
860 * skb processing functions 860 * skb processing functions
861 */ 861 */
862 862
863int ip6_output(struct sock *sk, struct sk_buff *skb); 863int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
864int ip6_forward(struct sk_buff *skb); 864int ip6_forward(struct sk_buff *skb);
865int ip6_input(struct sk_buff *skb); 865int ip6_input(struct sk_buff *skb);
866int ip6_mc_input(struct sk_buff *skb); 866int ip6_mc_input(struct sk_buff *skb);
867 867
868int __ip6_local_out(struct sk_buff *skb); 868int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
869int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb); 869int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
870int ip6_local_out(struct sk_buff *skb);
871 870
872/* 871/*
873 * Extension header (options) processing 872 * Extension header (options) processing
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index 0894ced31957..b867b0cf79e8 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -141,14 +141,14 @@ struct iucv_handler {
141 * called is the order of the registration of the iucv handlers 141 * called is the order of the registration of the iucv handlers
142 * to the base code. 142 * to the base code.
143 */ 143 */
144 int (*path_pending)(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); 144 int (*path_pending)(struct iucv_path *, u8 *ipvmid, u8 *ipuser);
145 /* 145 /*
146 * The path_complete function is called after an iucv interrupt 146 * The path_complete function is called after an iucv interrupt
147 * type 0x02 has been received for a path that has been established 147 * type 0x02 has been received for a path that has been established
148 * for this handler with iucv_path_connect and got accepted by the 148 * for this handler with iucv_path_connect and got accepted by the
149 * peer with iucv_path_accept. 149 * peer with iucv_path_accept.
150 */ 150 */
151 void (*path_complete)(struct iucv_path *, u8 ipuser[16]); 151 void (*path_complete)(struct iucv_path *, u8 *ipuser);
152 /* 152 /*
153 * The path_severed function is called after an iucv interrupt 153 * The path_severed function is called after an iucv interrupt
154 * type 0x03 has been received. The communication peer shutdown 154 * type 0x03 has been received. The communication peer shutdown
@@ -156,20 +156,20 @@ struct iucv_handler {
156 * remaining messages can be received until a iucv_path_sever 156 * remaining messages can be received until a iucv_path_sever
157 * shuts down the other end of the path as well. 157 * shuts down the other end of the path as well.
158 */ 158 */
159 void (*path_severed)(struct iucv_path *, u8 ipuser[16]); 159 void (*path_severed)(struct iucv_path *, u8 *ipuser);
160 /* 160 /*
161 * The path_quiesced function is called after an icuv interrupt 161 * The path_quiesced function is called after an icuv interrupt
162 * type 0x04 has been received. The communication peer has quiesced 162 * type 0x04 has been received. The communication peer has quiesced
163 * the path. Delivery of messages is stopped until iucv_path_resume 163 * the path. Delivery of messages is stopped until iucv_path_resume
164 * has been called. 164 * has been called.
165 */ 165 */
166 void (*path_quiesced)(struct iucv_path *, u8 ipuser[16]); 166 void (*path_quiesced)(struct iucv_path *, u8 *ipuser);
167 /* 167 /*
168 * The path_resumed function is called after an icuv interrupt 168 * The path_resumed function is called after an icuv interrupt
169 * type 0x05 has been received. The communication peer has resumed 169 * type 0x05 has been received. The communication peer has resumed
170 * the path. 170 * the path.
171 */ 171 */
172 void (*path_resumed)(struct iucv_path *, u8 ipuser[16]); 172 void (*path_resumed)(struct iucv_path *, u8 *ipuser);
173 /* 173 /*
174 * The message_pending function is called after an icuv interrupt 174 * The message_pending function is called after an icuv interrupt
175 * type 0x06 or type 0x07 has been received. A new message is 175 * type 0x06 or type 0x07 has been received. A new message is
@@ -256,7 +256,7 @@ static inline void iucv_path_free(struct iucv_path *path)
256 * Returns the result of the CP IUCV call. 256 * Returns the result of the CP IUCV call.
257 */ 257 */
258int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, 258int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
259 u8 userdata[16], void *private); 259 u8 *userdata, void *private);
260 260
261/** 261/**
262 * iucv_path_connect 262 * iucv_path_connect
@@ -274,7 +274,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
274 * Returns the result of the CP IUCV call. 274 * Returns the result of the CP IUCV call.
275 */ 275 */
276int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, 276int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
277 u8 userid[8], u8 system[8], u8 userdata[16], 277 u8 *userid, u8 *system, u8 *userdata,
278 void *private); 278 void *private);
279 279
280/** 280/**
@@ -287,7 +287,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
287 * 287 *
288 * Returns the result from the CP IUCV call. 288 * Returns the result from the CP IUCV call.
289 */ 289 */
290int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]); 290int iucv_path_quiesce(struct iucv_path *path, u8 *userdata);
291 291
292/** 292/**
293 * iucv_path_resume: 293 * iucv_path_resume:
@@ -299,7 +299,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]);
299 * 299 *
300 * Returns the result from the CP IUCV call. 300 * Returns the result from the CP IUCV call.
301 */ 301 */
302int iucv_path_resume(struct iucv_path *path, u8 userdata[16]); 302int iucv_path_resume(struct iucv_path *path, u8 *userdata);
303 303
304/** 304/**
305 * iucv_path_sever 305 * iucv_path_sever
@@ -310,7 +310,7 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16]);
310 * 310 *
311 * Returns the result from the CP IUCV call. 311 * Returns the result from the CP IUCV call.
312 */ 312 */
313int iucv_path_sever(struct iucv_path *path, u8 userdata[16]); 313int iucv_path_sever(struct iucv_path *path, u8 *userdata);
314 314
315/** 315/**
316 * iucv_message_purge 316 * iucv_message_purge
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
new file mode 100644
index 000000000000..774d85b2d5d9
--- /dev/null
+++ b/include/net/l3mdev.h
@@ -0,0 +1,222 @@
1/*
2 * include/net/l3mdev.h - L3 master device API
3 * Copyright (c) 2015 Cumulus Networks
4 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#ifndef _NET_L3MDEV_H_
12#define _NET_L3MDEV_H_
13
14/**
15 * struct l3mdev_ops - l3mdev operations
16 *
17 * @l3mdev_fib_table: Get FIB table id to use for lookups
18 *
19 * @l3mdev_get_rtable: Get cached IPv4 rtable (dst_entry) for device
20 *
21 * @l3mdev_get_saddr: Get source address for a flow
22 *
23 * @l3mdev_get_rt6_dst: Get cached IPv6 rt6_info (dst_entry) for device
24 */
25
26struct l3mdev_ops {
27 u32 (*l3mdev_fib_table)(const struct net_device *dev);
28
29 /* IPv4 ops */
30 struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev,
31 const struct flowi4 *fl4);
32 void (*l3mdev_get_saddr)(struct net_device *dev,
33 struct flowi4 *fl4);
34
35 /* IPv6 ops */
36 struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *dev,
37 const struct flowi6 *fl6);
38};
39
40#ifdef CONFIG_NET_L3_MASTER_DEV
41
42int l3mdev_master_ifindex_rcu(struct net_device *dev);
43static inline int l3mdev_master_ifindex(struct net_device *dev)
44{
45 int ifindex;
46
47 rcu_read_lock();
48 ifindex = l3mdev_master_ifindex_rcu(dev);
49 rcu_read_unlock();
50
51 return ifindex;
52}
53
54/* get index of an interface to use for FIB lookups. For devices
55 * enslaved to an L3 master device FIB lookups are based on the
56 * master index
57 */
58static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
59{
60 return l3mdev_master_ifindex_rcu(dev) ? : dev->ifindex;
61}
62
63static inline int l3mdev_fib_oif(struct net_device *dev)
64{
65 int oif;
66
67 rcu_read_lock();
68 oif = l3mdev_fib_oif_rcu(dev);
69 rcu_read_unlock();
70
71 return oif;
72}
73
74u32 l3mdev_fib_table_rcu(const struct net_device *dev);
75u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
76static inline u32 l3mdev_fib_table(const struct net_device *dev)
77{
78 u32 tb_id;
79
80 rcu_read_lock();
81 tb_id = l3mdev_fib_table_rcu(dev);
82 rcu_read_unlock();
83
84 return tb_id;
85}
86
87static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
88 const struct flowi4 *fl4)
89{
90 if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rtable)
91 return dev->l3mdev_ops->l3mdev_get_rtable(dev, fl4);
92
93 return NULL;
94}
95
96static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
97{
98 struct net_device *dev;
99 bool rc = false;
100
101 if (ifindex == 0)
102 return false;
103
104 rcu_read_lock();
105
106 dev = dev_get_by_index_rcu(net, ifindex);
107 if (dev)
108 rc = netif_is_l3_master(dev);
109
110 rcu_read_unlock();
111
112 return rc;
113}
114
115static inline void l3mdev_get_saddr(struct net *net, int ifindex,
116 struct flowi4 *fl4)
117{
118 struct net_device *dev;
119
120 if (ifindex) {
121
122 rcu_read_lock();
123
124 dev = dev_get_by_index_rcu(net, ifindex);
125 if (dev && netif_is_l3_master(dev) &&
126 dev->l3mdev_ops->l3mdev_get_saddr) {
127 dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4);
128 }
129
130 rcu_read_unlock();
131 }
132}
133
134static inline struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
135 const struct flowi6 *fl6)
136{
137 if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rt6_dst)
138 return dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6);
139
140 return NULL;
141}
142
143static inline
144struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
145 const struct flowi6 *fl6)
146{
147 struct dst_entry *dst = NULL;
148 struct net_device *dev;
149
150 dev = dev_get_by_index(net, fl6->flowi6_oif);
151 if (dev) {
152 dst = l3mdev_get_rt6_dst(dev, fl6);
153 dev_put(dev);
154 }
155
156 return dst;
157}
158
159#else
160
161static inline int l3mdev_master_ifindex_rcu(struct net_device *dev)
162{
163 return 0;
164}
165static inline int l3mdev_master_ifindex(struct net_device *dev)
166{
167 return 0;
168}
169
170static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
171{
172 return dev ? dev->ifindex : 0;
173}
174static inline int l3mdev_fib_oif(struct net_device *dev)
175{
176 return dev ? dev->ifindex : 0;
177}
178
179static inline u32 l3mdev_fib_table_rcu(const struct net_device *dev)
180{
181 return 0;
182}
183static inline u32 l3mdev_fib_table(const struct net_device *dev)
184{
185 return 0;
186}
187static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
188{
189 return 0;
190}
191
192static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
193 const struct flowi4 *fl4)
194{
195 return NULL;
196}
197
198static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
199{
200 return false;
201}
202
203static inline void l3mdev_get_saddr(struct net *net, int ifindex,
204 struct flowi4 *fl4)
205{
206}
207
208static inline
209struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
210 const struct flowi6 *fl6)
211{
212 return NULL;
213}
214static inline
215struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
216 const struct flowi6 *fl6)
217{
218 return NULL;
219}
220#endif
221
222#endif /* _NET_L3MDEV_H_ */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index fce0e35e74d0..66350ce3e955 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -18,7 +18,7 @@ struct lwtunnel_state {
18 __u16 type; 18 __u16 type;
19 __u16 flags; 19 __u16 flags;
20 atomic_t refcnt; 20 atomic_t refcnt;
21 int (*orig_output)(struct sock *sk, struct sk_buff *skb); 21 int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
22 int (*orig_input)(struct sk_buff *); 22 int (*orig_input)(struct sk_buff *);
23 int len; 23 int len;
24 __u8 data[0]; 24 __u8 data[0];
@@ -28,7 +28,7 @@ struct lwtunnel_encap_ops {
28 int (*build_state)(struct net_device *dev, struct nlattr *encap, 28 int (*build_state)(struct net_device *dev, struct nlattr *encap,
29 unsigned int family, const void *cfg, 29 unsigned int family, const void *cfg,
30 struct lwtunnel_state **ts); 30 struct lwtunnel_state **ts);
31 int (*output)(struct sock *sk, struct sk_buff *skb); 31 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
32 int (*input)(struct sk_buff *skb); 32 int (*input)(struct sk_buff *skb);
33 int (*fill_encap)(struct sk_buff *skb, 33 int (*fill_encap)(struct sk_buff *skb,
34 struct lwtunnel_state *lwtstate); 34 struct lwtunnel_state *lwtstate);
@@ -88,7 +88,7 @@ int lwtunnel_fill_encap(struct sk_buff *skb,
88int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate); 88int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
89struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len); 89struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
90int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b); 90int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
91int lwtunnel_output(struct sock *sk, struct sk_buff *skb); 91int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
92int lwtunnel_input(struct sk_buff *skb); 92int lwtunnel_input(struct sk_buff *skb);
93 93
94#else 94#else
@@ -160,7 +160,7 @@ static inline int lwtunnel_cmp_encap(struct lwtunnel_state *a,
160 return 0; 160 return 0;
161} 161}
162 162
163static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb) 163static inline int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
164{ 164{
165 return -EOPNOTSUPP; 165 return -EOPNOTSUPP;
166} 166}
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index bfc569498bfa..82045fca388b 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -5,6 +5,7 @@
5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 * Copyright 2013-2014 Intel Mobile Communications GmbH
8 * Copyright (C) 2015 Intel Deutschland GmbH
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -481,7 +482,9 @@ struct ieee80211_event {
481 * Note that with TDLS this can be the case (channel is HT, protection must 482 * Note that with TDLS this can be the case (channel is HT, protection must
482 * be used from this field) even when the BSS association isn't using HT. 483 * be used from this field) even when the BSS association isn't using HT.
483 * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value 484 * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
484 * implies disabled 485 * implies disabled. As with the cfg80211 callback, a change here should
486 * cause an event to be sent indicating where the current value is in
487 * relation to the newly configured threshold.
485 * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis 488 * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
486 * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The 489 * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
487 * may filter ARP queries targeted for other addresses than listed here. 490 * may filter ARP queries targeted for other addresses than listed here.
@@ -1240,11 +1243,6 @@ enum ieee80211_smps_mode {
1240 * @flags: configuration flags defined above 1243 * @flags: configuration flags defined above
1241 * 1244 *
1242 * @listen_interval: listen interval in units of beacon interval 1245 * @listen_interval: listen interval in units of beacon interval
1243 * @max_sleep_period: the maximum number of beacon intervals to sleep for
1244 * before checking the beacon for a TIM bit (managed mode only); this
1245 * value will be only achievable between DTIM frames, the hardware
1246 * needs to check for the multicast traffic bit in DTIM beacons.
1247 * This variable is valid only when the CONF_PS flag is set.
1248 * @ps_dtim_period: The DTIM period of the AP we're connected to, for use 1246 * @ps_dtim_period: The DTIM period of the AP we're connected to, for use
1249 * in power saving. Power saving will not be enabled until a beacon 1247 * in power saving. Power saving will not be enabled until a beacon
1250 * has been received and the DTIM period is known. 1248 * has been received and the DTIM period is known.
@@ -1274,7 +1272,6 @@ enum ieee80211_smps_mode {
1274struct ieee80211_conf { 1272struct ieee80211_conf {
1275 u32 flags; 1273 u32 flags;
1276 int power_level, dynamic_ps_timeout; 1274 int power_level, dynamic_ps_timeout;
1277 int max_sleep_period;
1278 1275
1279 u16 listen_interval; 1276 u16 listen_interval;
1280 u8 ps_dtim_period; 1277 u8 ps_dtim_period;
@@ -1360,6 +1357,8 @@ enum ieee80211_vif_flags {
1360 * @debugfs_dir: debugfs dentry, can be used by drivers to create own per 1357 * @debugfs_dir: debugfs dentry, can be used by drivers to create own per
1361 * interface debug files. Note that it will be NULL for the virtual 1358 * interface debug files. Note that it will be NULL for the virtual
1362 * monitor interface (if that is requested.) 1359 * monitor interface (if that is requested.)
1360 * @probe_req_reg: probe requests should be reported to mac80211 for this
1361 * interface.
1363 * @drv_priv: data area for driver use, will always be aligned to 1362 * @drv_priv: data area for driver use, will always be aligned to
1364 * sizeof(void *). 1363 * sizeof(void *).
1365 * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) 1364 * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
@@ -1384,6 +1383,8 @@ struct ieee80211_vif {
1384 struct dentry *debugfs_dir; 1383 struct dentry *debugfs_dir;
1385#endif 1384#endif
1386 1385
1386 unsigned int probe_req_reg;
1387
1387 /* must be last */ 1388 /* must be last */
1388 u8 drv_priv[0] __aligned(sizeof(void *)); 1389 u8 drv_priv[0] __aligned(sizeof(void *));
1389}; 1390};
@@ -1494,10 +1495,8 @@ enum ieee80211_key_flags {
1494 * - Temporal Authenticator Rx MIC Key (64 bits) 1495 * - Temporal Authenticator Rx MIC Key (64 bits)
1495 * @icv_len: The ICV length for this key type 1496 * @icv_len: The ICV length for this key type
1496 * @iv_len: The IV length for this key type 1497 * @iv_len: The IV length for this key type
1497 * @drv_priv: pointer for driver use
1498 */ 1498 */
1499struct ieee80211_key_conf { 1499struct ieee80211_key_conf {
1500 void *drv_priv;
1501 atomic64_t tx_pn; 1500 atomic64_t tx_pn;
1502 u32 cipher; 1501 u32 cipher;
1503 u8 icv_len; 1502 u8 icv_len;
@@ -1680,6 +1679,7 @@ struct ieee80211_sta_rates {
1680 * @tdls: indicates whether the STA is a TDLS peer 1679 * @tdls: indicates whether the STA is a TDLS peer
1681 * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only 1680 * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
1682 * valid if the STA is a TDLS peer in the first place. 1681 * valid if the STA is a TDLS peer in the first place.
1682 * @mfp: indicates whether the STA uses management frame protection or not.
1683 * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) 1683 * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
1684 */ 1684 */
1685struct ieee80211_sta { 1685struct ieee80211_sta {
@@ -1697,6 +1697,7 @@ struct ieee80211_sta {
1697 struct ieee80211_sta_rates __rcu *rates; 1697 struct ieee80211_sta_rates __rcu *rates;
1698 bool tdls; 1698 bool tdls;
1699 bool tdls_initiator; 1699 bool tdls_initiator;
1700 bool mfp;
1700 1701
1701 struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; 1702 struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
1702 1703
@@ -1894,6 +1895,12 @@ struct ieee80211_txq {
1894 * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth 1895 * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
1895 * than then BSS bandwidth for a TDLS link on the base channel. 1896 * than then BSS bandwidth for a TDLS link on the base channel.
1896 * 1897 *
1898 * @IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU: The driver supports receiving A-MSDUs
1899 * within A-MPDU.
1900 *
1901 * @IEEE80211_HW_BEACON_TX_STATUS: The device/driver provides TX status
1902 * for sent beacons.
1903 *
1897 * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays 1904 * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
1898 */ 1905 */
1899enum ieee80211_hw_flags { 1906enum ieee80211_hw_flags {
@@ -1927,6 +1934,8 @@ enum ieee80211_hw_flags {
1927 IEEE80211_HW_SUPPORTS_CLONED_SKBS, 1934 IEEE80211_HW_SUPPORTS_CLONED_SKBS,
1928 IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS, 1935 IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
1929 IEEE80211_HW_TDLS_WIDER_BW, 1936 IEEE80211_HW_TDLS_WIDER_BW,
1937 IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU,
1938 IEEE80211_HW_BEACON_TX_STATUS,
1930 1939
1931 /* keep last, obviously */ 1940 /* keep last, obviously */
1932 NUM_IEEE80211_HW_FLAGS 1941 NUM_IEEE80211_HW_FLAGS
@@ -2827,6 +2836,13 @@ enum ieee80211_reconfig_type {
2827 * See the section "Frame filtering" for more information. 2836 * See the section "Frame filtering" for more information.
2828 * This callback must be implemented and can sleep. 2837 * This callback must be implemented and can sleep.
2829 * 2838 *
2839 * @config_iface_filter: Configure the interface's RX filter.
2840 * This callback is optional and is used to configure which frames
2841 * should be passed to mac80211. The filter_flags is the combination
2842 * of FIF_* flags. The changed_flags is a bit mask that indicates
2843 * which flags are changed.
2844 * This callback can sleep.
2845 *
2830 * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit 2846 * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit
2831 * must be set or cleared for a given STA. Must be atomic. 2847 * must be set or cleared for a given STA. Must be atomic.
2832 * 2848 *
@@ -3016,6 +3032,9 @@ enum ieee80211_reconfig_type {
3016 * buffer size of 8. Correct ways to retransmit #1 would be: 3032 * buffer size of 8. Correct ways to retransmit #1 would be:
3017 * - TX: 1 or 18 or 81 3033 * - TX: 1 or 18 or 81
3018 * Even "189" would be wrong since 1 could be lost again. 3034 * Even "189" would be wrong since 1 could be lost again.
3035 * The @amsdu parameter is valid when the action is set to
3036 * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's ability
3037 * to receive A-MSDU within A-MPDU.
3019 * 3038 *
3020 * Returns a negative error code on failure. 3039 * Returns a negative error code on failure.
3021 * The callback can sleep. 3040 * The callback can sleep.
@@ -3153,18 +3172,24 @@ enum ieee80211_reconfig_type {
3153 * The callback is optional and can sleep. 3172 * The callback is optional and can sleep.
3154 * 3173 *
3155 * @add_chanctx: Notifies device driver about new channel context creation. 3174 * @add_chanctx: Notifies device driver about new channel context creation.
3175 * This callback may sleep.
3156 * @remove_chanctx: Notifies device driver about channel context destruction. 3176 * @remove_chanctx: Notifies device driver about channel context destruction.
3177 * This callback may sleep.
3157 * @change_chanctx: Notifies device driver about channel context changes that 3178 * @change_chanctx: Notifies device driver about channel context changes that
3158 * may happen when combining different virtual interfaces on the same 3179 * may happen when combining different virtual interfaces on the same
3159 * channel context with different settings 3180 * channel context with different settings
3181 * This callback may sleep.
3160 * @assign_vif_chanctx: Notifies device driver about channel context being bound 3182 * @assign_vif_chanctx: Notifies device driver about channel context being bound
3161 * to vif. Possible use is for hw queue remapping. 3183 * to vif. Possible use is for hw queue remapping.
3184 * This callback may sleep.
3162 * @unassign_vif_chanctx: Notifies device driver about channel context being 3185 * @unassign_vif_chanctx: Notifies device driver about channel context being
3163 * unbound from vif. 3186 * unbound from vif.
3187 * This callback may sleep.
3164 * @switch_vif_chanctx: switch a number of vifs from one chanctx to 3188 * @switch_vif_chanctx: switch a number of vifs from one chanctx to
3165 * another, as specified in the list of 3189 * another, as specified in the list of
3166 * @ieee80211_vif_chanctx_switch passed to the driver, according 3190 * @ieee80211_vif_chanctx_switch passed to the driver, according
3167 * to the mode defined in &ieee80211_chanctx_switch_mode. 3191 * to the mode defined in &ieee80211_chanctx_switch_mode.
3192 * This callback may sleep.
3168 * 3193 *
3169 * @start_ap: Start operation on the AP interface, this is called after all the 3194 * @start_ap: Start operation on the AP interface, this is called after all the
3170 * information in bss_conf is set and beacon can be retrieved. A channel 3195 * information in bss_conf is set and beacon can be retrieved. A channel
@@ -3266,6 +3291,10 @@ struct ieee80211_ops {
3266 unsigned int changed_flags, 3291 unsigned int changed_flags,
3267 unsigned int *total_flags, 3292 unsigned int *total_flags,
3268 u64 multicast); 3293 u64 multicast);
3294 void (*config_iface_filter)(struct ieee80211_hw *hw,
3295 struct ieee80211_vif *vif,
3296 unsigned int filter_flags,
3297 unsigned int changed_flags);
3269 int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 3298 int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
3270 bool set); 3299 bool set);
3271 int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3300 int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -3349,7 +3378,7 @@ struct ieee80211_ops {
3349 struct ieee80211_vif *vif, 3378 struct ieee80211_vif *vif,
3350 enum ieee80211_ampdu_mlme_action action, 3379 enum ieee80211_ampdu_mlme_action action,
3351 struct ieee80211_sta *sta, u16 tid, u16 *ssn, 3380 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
3352 u8 buf_size); 3381 u8 buf_size, bool amsdu);
3353 int (*get_survey)(struct ieee80211_hw *hw, int idx, 3382 int (*get_survey)(struct ieee80211_hw *hw, int idx,
3354 struct survey_info *survey); 3383 struct survey_info *survey);
3355 void (*rfkill_poll)(struct ieee80211_hw *hw); 3384 void (*rfkill_poll)(struct ieee80211_hw *hw);
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index b7f99615224b..da574bbdc333 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -23,14 +23,6 @@
23 23
24#include <net/cfg802154.h> 24#include <net/cfg802154.h>
25 25
26/* General MAC frame format:
27 * 2 bytes: Frame Control
28 * 1 byte: Sequence Number
29 * 20 bytes: Addressing fields
30 * 14 bytes: Auxiliary Security Header
31 */
32#define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14)
33
34/** 26/**
35 * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags 27 * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags
36 * 28 *
@@ -250,6 +242,21 @@ struct ieee802154_ops {
250}; 242};
251 243
252/** 244/**
245 * ieee802154_get_fc_from_skb - get the frame control field from an skb
246 * @skb: skb where the frame control field will be get from
247 */
248static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
249{
250 /* return some invalid fc on failure */
251 if (unlikely(skb->len < 2)) {
252 WARN_ON(1);
253 return cpu_to_le16(0);
254 }
255
256 return (__force __le16)__get_unaligned_memmove16(skb_mac_header(skb));
257}
258
259/**
253 * ieee802154_be64_to_le64 - copies and convert be64 to le64 260 * ieee802154_be64_to_le64 - copies and convert be64 to le64
254 * @le64_dst: le64 destination pointer 261 * @le64_dst: le64 destination pointer
255 * @be64_src: be64 source pointer 262 * @be64_src: be64 source pointer
@@ -270,6 +277,16 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
270} 277}
271 278
272/** 279/**
280 * ieee802154_le16_to_be16 - copies and convert le16 to be16
281 * @be16_dst: be16 destination pointer
282 * @le16_src: le16 source pointer
283 */
284static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
285{
286 __put_unaligned_memmove16(swab16p(le16_src), be16_dst);
287}
288
289/**
273 * ieee802154_alloc_hw - Allocate a new hardware device 290 * ieee802154_alloc_hw - Allocate a new hardware device
274 * 291 *
275 * This must be called once for each hardware device. The returned pointer 292 * This must be called once for each hardware device. The returned pointer
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
index 4757997f76ed..179253f9dcfd 100644
--- a/include/net/mpls_iptunnel.h
+++ b/include/net/mpls_iptunnel.h
@@ -18,7 +18,7 @@
18 18
19struct mpls_iptunnel_encap { 19struct mpls_iptunnel_encap {
20 u32 label[MAX_NEW_LABELS]; 20 u32 label[MAX_NEW_LABELS];
21 u32 labels; 21 u8 labels;
22}; 22};
23 23
24static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate) 24static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index aba5695fadb0..bf3937431030 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -180,15 +180,13 @@ void ndisc_cleanup(void);
180 180
181int ndisc_rcv(struct sk_buff *skb); 181int ndisc_rcv(struct sk_buff *skb);
182 182
183void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, 183void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
184 const struct in6_addr *solicit,
185 const struct in6_addr *daddr, const struct in6_addr *saddr, 184 const struct in6_addr *daddr, const struct in6_addr *saddr,
186 struct sk_buff *oskb); 185 struct sk_buff *oskb);
187 186
188void ndisc_send_rs(struct net_device *dev, 187void ndisc_send_rs(struct net_device *dev,
189 const struct in6_addr *saddr, const struct in6_addr *daddr); 188 const struct in6_addr *saddr, const struct in6_addr *daddr);
190void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, 189void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
191 const struct in6_addr *daddr,
192 const struct in6_addr *solicited_addr, 190 const struct in6_addr *solicited_addr,
193 bool router, bool solicited, bool override, bool inc_opt); 191 bool router, bool solicited, bool override, bool inc_opt);
194 192
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index d4c6b5f30acd..e8d1448425a7 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -31,7 +31,7 @@ static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
31 skb->network_header -= len; 31 skb->network_header -= len;
32} 32}
33 33
34int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb); 34int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb);
35 35
36static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) 36static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
37{ 37{
@@ -45,12 +45,12 @@ struct net_device *setup_pre_routing(struct sk_buff *skb);
45void br_netfilter_enable(void); 45void br_netfilter_enable(void);
46 46
47#if IS_ENABLED(CONFIG_IPV6) 47#if IS_ENABLED(CONFIG_IPV6)
48int br_validate_ipv6(struct sk_buff *skb); 48int br_validate_ipv6(struct net *net, struct sk_buff *skb);
49unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, 49unsigned int br_nf_pre_routing_ipv6(void *priv,
50 struct sk_buff *skb, 50 struct sk_buff *skb,
51 const struct nf_hook_state *state); 51 const struct nf_hook_state *state);
52#else 52#else
53static inline int br_validate_ipv6(struct sk_buff *skb) 53static inline int br_validate_ipv6(struct net *net, struct sk_buff *skb)
54{ 54{
55 return -1; 55 return -1;
56} 56}
diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h
index 42008f10dfc4..0a14733e8b82 100644
--- a/include/net/netfilter/ipv4/nf_dup_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_dup_ipv4.h
@@ -1,7 +1,7 @@
1#ifndef _NF_DUP_IPV4_H_ 1#ifndef _NF_DUP_IPV4_H_
2#define _NF_DUP_IPV4_H_ 2#define _NF_DUP_IPV4_H_
3 3
4void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum, 4void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
5 const struct in_addr *gw, int oif); 5 const struct in_addr *gw, int oif);
6 6
7#endif /* _NF_DUP_IPV4_H_ */ 7#endif /* _NF_DUP_IPV4_H_ */
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 77862c3645f0..df7ecd806aba 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -6,7 +6,7 @@
6#include <net/icmp.h> 6#include <net/icmp.h>
7 7
8void nf_send_unreach(struct sk_buff *skb_in, int code, int hook); 8void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
9void nf_send_reset(struct sk_buff *oldskb, int hook); 9void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook);
10 10
11const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, 11const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
12 struct tcphdr *_oth, int hook); 12 struct tcphdr *_oth, int hook);
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index 27666d8a0bd0..fb7da5bb76cc 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -5,7 +5,7 @@ void nf_defrag_ipv6_enable(void);
5 5
6int nf_ct_frag6_init(void); 6int nf_ct_frag6_init(void);
7void nf_ct_frag6_cleanup(void); 7void nf_ct_frag6_cleanup(void);
8struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user); 8struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user);
9void nf_ct_frag6_consume_orig(struct sk_buff *skb); 9void nf_ct_frag6_consume_orig(struct sk_buff *skb);
10 10
11struct inet_frags_ctl; 11struct inet_frags_ctl;
diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h
index ed6bd66fa5a0..fa6237b382a3 100644
--- a/include/net/netfilter/ipv6/nf_dup_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_dup_ipv6.h
@@ -1,7 +1,7 @@
1#ifndef _NF_DUP_IPV6_H_ 1#ifndef _NF_DUP_IPV6_H_
2#define _NF_DUP_IPV6_H_ 2#define _NF_DUP_IPV6_H_
3 3
4void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum, 4void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
5 const struct in6_addr *gw, int oif); 5 const struct in6_addr *gw, int oif);
6 6
7#endif /* _NF_DUP_IPV6_H_ */ 7#endif /* _NF_DUP_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index e8ad46834df8..fde4068eec0b 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -183,15 +183,12 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
183 183
184void nf_ct_free_hashtable(void *hash, unsigned int size); 184void nf_ct_free_hashtable(void *hash, unsigned int size);
185 185
186struct nf_conntrack_tuple_hash *
187__nf_conntrack_find(struct net *net, u16 zone,
188 const struct nf_conntrack_tuple *tuple);
189
190int nf_conntrack_hash_check_insert(struct nf_conn *ct); 186int nf_conntrack_hash_check_insert(struct nf_conn *ct);
191bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report); 187bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
192 188
193bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, 189bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
194 u_int16_t l3num, struct nf_conntrack_tuple *tuple); 190 u_int16_t l3num, struct net *net,
191 struct nf_conntrack_tuple *tuple);
195bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, 192bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
196 const struct nf_conntrack_tuple *orig); 193 const struct nf_conntrack_tuple *orig);
197 194
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index c03f9c42b3cd..788ef58a66b9 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -41,6 +41,7 @@ void nf_conntrack_cleanup_end(void);
41 41
42bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, 42bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
43 unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, 43 unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
44 struct net *net,
44 struct nf_conntrack_tuple *tuple, 45 struct nf_conntrack_tuple *tuple,
45 const struct nf_conntrack_l3proto *l3proto, 46 const struct nf_conntrack_l3proto *l3proto,
46 const struct nf_conntrack_l4proto *l4proto); 47 const struct nf_conntrack_l4proto *l4proto);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 1f7061313d54..956d8a6ac069 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -26,7 +26,7 @@ struct nf_conntrack_l4proto {
26 /* Try to fill in the third arg: dataoff is offset past network protocol 26 /* Try to fill in the third arg: dataoff is offset past network protocol
27 hdr. Return true if possible. */ 27 hdr. Return true if possible. */
28 bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff, 28 bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
29 struct nf_conntrack_tuple *tuple); 29 struct net *net, struct nf_conntrack_tuple *tuple);
30 30
31 /* Invert the per-proto part of the tuple: ie. turn xmit into reply. 31 /* Invert the per-proto part of the tuple: ie. turn xmit into reply.
32 * Some packets can't be inverted: return 0 in that case. 32 * Some packets can't be inverted: return 0 in that case.
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 62308713dd7f..f72be38860a7 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -20,10 +20,20 @@ struct ctnl_timeout {
20}; 20};
21 21
22struct nf_conn_timeout { 22struct nf_conn_timeout {
23 struct ctnl_timeout *timeout; 23 struct ctnl_timeout __rcu *timeout;
24}; 24};
25 25
26#define NF_CT_TIMEOUT_EXT_DATA(__t) (unsigned int *) &((__t)->timeout->data) 26static inline unsigned int *
27nf_ct_timeout_data(struct nf_conn_timeout *t)
28{
29 struct ctnl_timeout *timeout;
30
31 timeout = rcu_dereference(t->timeout);
32 if (timeout == NULL)
33 return NULL;
34
35 return (unsigned int *)timeout->data;
36}
27 37
28static inline 38static inline
29struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct) 39struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
@@ -47,7 +57,7 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
47 if (timeout_ext == NULL) 57 if (timeout_ext == NULL)
48 return NULL; 58 return NULL;
49 59
50 timeout_ext->timeout = timeout; 60 rcu_assign_pointer(timeout_ext->timeout, timeout);
51 61
52 return timeout_ext; 62 return timeout_ext;
53#else 63#else
@@ -64,10 +74,13 @@ nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
64 unsigned int *timeouts; 74 unsigned int *timeouts;
65 75
66 timeout_ext = nf_ct_timeout_find(ct); 76 timeout_ext = nf_ct_timeout_find(ct);
67 if (timeout_ext) 77 if (timeout_ext) {
68 timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext); 78 timeouts = nf_ct_timeout_data(timeout_ext);
69 else 79 if (unlikely(!timeouts))
80 timeouts = l4proto->get_timeouts(net);
81 } else {
70 timeouts = l4proto->get_timeouts(net); 82 timeouts = l4proto->get_timeouts(net);
83 }
71 84
72 return timeouts; 85 return timeouts;
73#else 86#else
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index fbfd1ba4254e..186c54138f35 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -10,7 +10,7 @@
10unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo, 10unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
11 unsigned int hooknum, struct sk_buff *skb); 11 unsigned int hooknum, struct sk_buff *skb);
12 12
13int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family); 13int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family);
14 14
15static inline int nf_nat_initialized(struct nf_conn *ct, 15static inline int nf_nat_initialized(struct nf_conn *ct,
16 enum nf_nat_manip_type manip) 16 enum nf_nat_manip_type manip)
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index a3127325f624..aef3e5fc9fd9 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -43,31 +43,31 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
43 enum ip_conntrack_info ctinfo, 43 enum ip_conntrack_info ctinfo,
44 unsigned int hooknum); 44 unsigned int hooknum);
45 45
46unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, 46unsigned int nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
47 const struct nf_hook_state *state, 47 const struct nf_hook_state *state,
48 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 48 unsigned int (*do_chain)(void *priv,
49 struct sk_buff *skb, 49 struct sk_buff *skb,
50 const struct nf_hook_state *state, 50 const struct nf_hook_state *state,
51 struct nf_conn *ct)); 51 struct nf_conn *ct));
52 52
53unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, 53unsigned int nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
54 const struct nf_hook_state *state, 54 const struct nf_hook_state *state,
55 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 55 unsigned int (*do_chain)(void *priv,
56 struct sk_buff *skb, 56 struct sk_buff *skb,
57 const struct nf_hook_state *state, 57 const struct nf_hook_state *state,
58 struct nf_conn *ct)); 58 struct nf_conn *ct));
59 59
60unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, 60unsigned int nf_nat_ipv4_local_fn(void *priv,
61 struct sk_buff *skb, 61 struct sk_buff *skb,
62 const struct nf_hook_state *state, 62 const struct nf_hook_state *state,
63 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 63 unsigned int (*do_chain)(void *priv,
64 struct sk_buff *skb, 64 struct sk_buff *skb,
65 const struct nf_hook_state *state, 65 const struct nf_hook_state *state,
66 struct nf_conn *ct)); 66 struct nf_conn *ct));
67 67
68unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, 68unsigned int nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
69 const struct nf_hook_state *state, 69 const struct nf_hook_state *state,
70 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 70 unsigned int (*do_chain)(void *priv,
71 struct sk_buff *skb, 71 struct sk_buff *skb,
72 const struct nf_hook_state *state, 72 const struct nf_hook_state *state,
73 struct nf_conn *ct)); 73 struct nf_conn *ct));
@@ -76,31 +76,31 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
76 enum ip_conntrack_info ctinfo, 76 enum ip_conntrack_info ctinfo,
77 unsigned int hooknum, unsigned int hdrlen); 77 unsigned int hooknum, unsigned int hdrlen);
78 78
79unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, 79unsigned int nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
80 const struct nf_hook_state *state, 80 const struct nf_hook_state *state,
81 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 81 unsigned int (*do_chain)(void *priv,
82 struct sk_buff *skb, 82 struct sk_buff *skb,
83 const struct nf_hook_state *state, 83 const struct nf_hook_state *state,
84 struct nf_conn *ct)); 84 struct nf_conn *ct));
85 85
86unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, 86unsigned int nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
87 const struct nf_hook_state *state, 87 const struct nf_hook_state *state,
88 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 88 unsigned int (*do_chain)(void *priv,
89 struct sk_buff *skb, 89 struct sk_buff *skb,
90 const struct nf_hook_state *state, 90 const struct nf_hook_state *state,
91 struct nf_conn *ct)); 91 struct nf_conn *ct));
92 92
93unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, 93unsigned int nf_nat_ipv6_local_fn(void *priv,
94 struct sk_buff *skb, 94 struct sk_buff *skb,
95 const struct nf_hook_state *state, 95 const struct nf_hook_state *state,
96 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 96 unsigned int (*do_chain)(void *priv,
97 struct sk_buff *skb, 97 struct sk_buff *skb,
98 const struct nf_hook_state *state, 98 const struct nf_hook_state *state,
99 struct nf_conn *ct)); 99 struct nf_conn *ct));
100 100
101unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, 101unsigned int nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
102 const struct nf_hook_state *state, 102 const struct nf_hook_state *state,
103 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 103 unsigned int (*do_chain)(void *priv,
104 struct sk_buff *skb, 104 struct sk_buff *skb,
105 const struct nf_hook_state *state, 105 const struct nf_hook_state *state,
106 struct nf_conn *ct)); 106 struct nf_conn *ct));
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index e8635854a55b..9c5638ad872e 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -32,7 +32,7 @@ void nf_register_queue_handler(const struct nf_queue_handler *qh);
32void nf_unregister_queue_handler(void); 32void nf_unregister_queue_handler(void);
33void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); 33void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
34 34
35bool nf_queue_entry_get_refs(struct nf_queue_entry *entry); 35void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
36void nf_queue_entry_release_refs(struct nf_queue_entry *entry); 36void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
37 37
38static inline void init_hashrandom(u32 *jhash_initval) 38static inline void init_hashrandom(u32 *jhash_initval)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index aa8bee72c9d3..4bd7508bedc9 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -14,9 +14,11 @@
14 14
15struct nft_pktinfo { 15struct nft_pktinfo {
16 struct sk_buff *skb; 16 struct sk_buff *skb;
17 struct net *net;
17 const struct net_device *in; 18 const struct net_device *in;
18 const struct net_device *out; 19 const struct net_device *out;
19 const struct nf_hook_ops *ops; 20 u8 pf;
21 u8 hook;
20 u8 nhoff; 22 u8 nhoff;
21 u8 thoff; 23 u8 thoff;
22 u8 tprot; 24 u8 tprot;
@@ -25,16 +27,15 @@ struct nft_pktinfo {
25}; 27};
26 28
27static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, 29static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
28 const struct nf_hook_ops *ops,
29 struct sk_buff *skb, 30 struct sk_buff *skb,
30 const struct nf_hook_state *state) 31 const struct nf_hook_state *state)
31{ 32{
32 pkt->skb = skb; 33 pkt->skb = skb;
34 pkt->net = pkt->xt.net = state->net;
33 pkt->in = pkt->xt.in = state->in; 35 pkt->in = pkt->xt.in = state->in;
34 pkt->out = pkt->xt.out = state->out; 36 pkt->out = pkt->xt.out = state->out;
35 pkt->ops = ops; 37 pkt->hook = pkt->xt.hooknum = state->hook;
36 pkt->xt.hooknum = ops->hooknum; 38 pkt->pf = pkt->xt.family = state->pf;
37 pkt->xt.family = ops->pf;
38} 39}
39 40
40/** 41/**
@@ -617,6 +618,8 @@ struct nft_expr_ops {
617 void (*eval)(const struct nft_expr *expr, 618 void (*eval)(const struct nft_expr *expr,
618 struct nft_regs *regs, 619 struct nft_regs *regs,
619 const struct nft_pktinfo *pkt); 620 const struct nft_pktinfo *pkt);
621 int (*clone)(struct nft_expr *dst,
622 const struct nft_expr *src);
620 unsigned int size; 623 unsigned int size;
621 624
622 int (*init)(const struct nft_ctx *ctx, 625 int (*init)(const struct nft_ctx *ctx,
@@ -659,10 +662,20 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
659int nft_expr_dump(struct sk_buff *skb, unsigned int attr, 662int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
660 const struct nft_expr *expr); 663 const struct nft_expr *expr);
661 664
662static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) 665static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
663{ 666{
667 int err;
668
664 __module_get(src->ops->type->owner); 669 __module_get(src->ops->type->owner);
665 memcpy(dst, src, src->ops->size); 670 if (src->ops->clone) {
671 dst->ops = src->ops;
672 err = src->ops->clone(dst, src);
673 if (err < 0)
674 return err;
675 } else {
676 memcpy(dst, src, src->ops->size);
677 }
678 return 0;
666} 679}
667 680
668/** 681/**
@@ -815,8 +828,7 @@ int nft_register_basechain(struct nft_base_chain *basechain,
815void nft_unregister_basechain(struct nft_base_chain *basechain, 828void nft_unregister_basechain(struct nft_base_chain *basechain,
816 unsigned int hook_nops); 829 unsigned int hook_nops);
817 830
818unsigned int nft_do_chain(struct nft_pktinfo *pkt, 831unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
819 const struct nf_hook_ops *ops);
820 832
821/** 833/**
822 * struct nft_table - nf_tables table 834 * struct nft_table - nf_tables table
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 2df7f96902ee..ca6ef6bf775e 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -6,13 +6,12 @@
6 6
7static inline void 7static inline void
8nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, 8nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
9 const struct nf_hook_ops *ops,
10 struct sk_buff *skb, 9 struct sk_buff *skb,
11 const struct nf_hook_state *state) 10 const struct nf_hook_state *state)
12{ 11{
13 struct iphdr *ip; 12 struct iphdr *ip;
14 13
15 nft_set_pktinfo(pkt, ops, skb, state); 14 nft_set_pktinfo(pkt, skb, state);
16 15
17 ip = ip_hdr(pkt->skb); 16 ip = ip_hdr(pkt->skb);
18 pkt->tprot = ip->protocol; 17 pkt->tprot = ip->protocol;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 97db2e3a5e65..8ad39a6a5fe1 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -6,14 +6,13 @@
6 6
7static inline int 7static inline int
8nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, 8nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
9 const struct nf_hook_ops *ops,
10 struct sk_buff *skb, 9 struct sk_buff *skb,
11 const struct nf_hook_state *state) 10 const struct nf_hook_state *state)
12{ 11{
13 int protohdr, thoff = 0; 12 int protohdr, thoff = 0;
14 unsigned short frag_off; 13 unsigned short frag_off;
15 14
16 nft_set_pktinfo(pkt, ops, skb, state); 15 nft_set_pktinfo(pkt, skb, state);
17 16
18 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); 17 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
19 /* If malformed, drop it */ 18 /* If malformed, drop it */
diff --git a/include/net/netfilter/nfnetlink_queue.h b/include/net/netfilter/nfnetlink_queue.h
deleted file mode 100644
index aff88ba91391..000000000000
--- a/include/net/netfilter/nfnetlink_queue.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef _NET_NFNL_QUEUE_H_
2#define _NET_NFNL_QUEUE_H_
3
4#include <linux/netfilter/nf_conntrack_common.h>
5
6struct nf_conn;
7
8#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
9struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
10 enum ip_conntrack_info *ctinfo);
11struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
12 const struct nlattr *attr,
13 enum ip_conntrack_info *ctinfo);
14int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
15 enum ip_conntrack_info ctinfo);
16void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
17 enum ip_conntrack_info ctinfo, int diff);
18int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
19 u32 portid, u32 report);
20#else
21inline struct nf_conn *
22nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo)
23{
24 return NULL;
25}
26
27inline struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
28 const struct nlattr *attr,
29 enum ip_conntrack_info *ctinfo)
30{
31 return NULL;
32}
33
34inline int
35nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo)
36{
37 return 0;
38}
39
40inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
41 enum ip_conntrack_info ctinfo, int diff)
42{
43}
44
45inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
46 u32 portid, u32 report)
47{
48 return 0;
49}
50#endif /* NF_CONNTRACK */
51#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 2a5dbcc90d1c..0e3172751755 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -1004,6 +1004,15 @@ static inline __be32 nla_get_be32(const struct nlattr *nla)
1004} 1004}
1005 1005
1006/** 1006/**
1007 * nla_get_le32 - return payload of __le32 attribute
1008 * @nla: __le32 netlink attribute
1009 */
1010static inline __le32 nla_get_le32(const struct nlattr *nla)
1011{
1012 return *(__le32 *) nla_data(nla);
1013}
1014
1015/**
1007 * nla_get_u16 - return payload of u16 attribute 1016 * nla_get_u16 - return payload of u16 attribute
1008 * @nla: u16 netlink attribute 1017 * @nla: u16 netlink attribute
1009 */ 1018 */
@@ -1066,6 +1075,15 @@ static inline __be64 nla_get_be64(const struct nlattr *nla)
1066} 1075}
1067 1076
1068/** 1077/**
1078 * nla_get_le64 - return payload of __le64 attribute
1079 * @nla: __le64 netlink attribute
1080 */
1081static inline __le64 nla_get_le64(const struct nlattr *nla)
1082{
1083 return *(__le64 *) nla_data(nla);
1084}
1085
1086/**
1069 * nla_get_s32 - return payload of s32 attribute 1087 * nla_get_s32 - return payload of s32 attribute
1070 * @nla: s32 netlink attribute 1088 * @nla: s32 netlink attribute
1071 */ 1089 */
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 75d2e1880059..707e3ab816c2 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -35,6 +35,7 @@
35#define NCI_MAX_NUM_RF_CONFIGS 10 35#define NCI_MAX_NUM_RF_CONFIGS 10
36#define NCI_MAX_NUM_CONN 10 36#define NCI_MAX_NUM_CONN 10
37#define NCI_MAX_PARAM_LEN 251 37#define NCI_MAX_PARAM_LEN 251
38#define NCI_MAX_PAYLOAD_SIZE 255
38#define NCI_MAX_PACKET_SIZE 258 39#define NCI_MAX_PACKET_SIZE 258
39 40
40/* NCI Status Codes */ 41/* NCI Status Codes */
@@ -315,6 +316,8 @@ struct nci_nfcee_mode_set_cmd {
315 __u8 nfcee_mode; 316 __u8 nfcee_mode;
316} __packed; 317} __packed;
317 318
319#define NCI_OP_CORE_GET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x03)
320
318/* ----------------------- */ 321/* ----------------------- */
319/* ---- NCI Responses ---- */ 322/* ---- NCI Responses ---- */
320/* ----------------------- */ 323/* ----------------------- */
@@ -375,6 +378,9 @@ struct nci_nfcee_discover_rsp {
375} __packed; 378} __packed;
376 379
377#define NCI_OP_NFCEE_MODE_SET_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01) 380#define NCI_OP_NFCEE_MODE_SET_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01)
381
382#define NCI_OP_CORE_GET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x03)
383
378/* --------------------------- */ 384/* --------------------------- */
379/* ---- NCI Notifications ---- */ 385/* ---- NCI Notifications ---- */
380/* --------------------------- */ 386/* --------------------------- */
@@ -528,4 +534,6 @@ struct nci_nfcee_discover_ntf {
528 struct nci_nfcee_information_tlv information_tlv; 534 struct nci_nfcee_information_tlv information_tlv;
529} __packed; 535} __packed;
530 536
537#define NCI_OP_CORE_RESET_NTF nci_opcode_pack(NCI_GID_CORE, 0x00)
538
531#endif /* __NCI_H */ 539#endif /* __NCI_H */
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index d0d0f1e53bb9..57ce24fb0047 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -67,7 +67,7 @@ enum nci_state {
67 67
68struct nci_dev; 68struct nci_dev;
69 69
70struct nci_prop_ops { 70struct nci_driver_ops {
71 __u16 opcode; 71 __u16 opcode;
72 int (*rsp)(struct nci_dev *dev, struct sk_buff *skb); 72 int (*rsp)(struct nci_dev *dev, struct sk_buff *skb);
73 int (*ntf)(struct nci_dev *dev, struct sk_buff *skb); 73 int (*ntf)(struct nci_dev *dev, struct sk_buff *skb);
@@ -94,8 +94,11 @@ struct nci_ops {
94 void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd, 94 void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd,
95 struct sk_buff *skb); 95 struct sk_buff *skb);
96 96
97 struct nci_prop_ops *prop_ops; 97 struct nci_driver_ops *prop_ops;
98 size_t n_prop_ops; 98 size_t n_prop_ops;
99
100 struct nci_driver_ops *core_ops;
101 size_t n_core_ops;
99}; 102};
100 103
101#define NCI_MAX_SUPPORTED_RF_INTERFACES 4 104#define NCI_MAX_SUPPORTED_RF_INTERFACES 4
@@ -125,6 +128,8 @@ struct nci_conn_info {
125 128
126/* Gates */ 129/* Gates */
127#define NCI_HCI_ADMIN_GATE 0x00 130#define NCI_HCI_ADMIN_GATE 0x00
131#define NCI_HCI_LOOPBACK_GATE 0x04
132#define NCI_HCI_IDENTITY_MGMT_GATE 0x05
128#define NCI_HCI_LINK_MGMT_GATE 0x06 133#define NCI_HCI_LINK_MGMT_GATE 0x06
129 134
130/* Pipes */ 135/* Pipes */
@@ -278,10 +283,12 @@ int nci_request(struct nci_dev *ndev,
278 unsigned long opt), 283 unsigned long opt),
279 unsigned long opt, __u32 timeout); 284 unsigned long opt, __u32 timeout);
280int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload); 285int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
286int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len, __u8 *payload);
281int nci_core_reset(struct nci_dev *ndev); 287int nci_core_reset(struct nci_dev *ndev);
282int nci_core_init(struct nci_dev *ndev); 288int nci_core_init(struct nci_dev *ndev);
283 289
284int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb); 290int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
291int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb);
285int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val); 292int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
286 293
287int nci_nfcee_discover(struct nci_dev *ndev, u8 action); 294int nci_nfcee_discover(struct nci_dev *ndev, u8 action);
@@ -305,6 +312,7 @@ int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
305 const u8 *param, size_t param_len); 312 const u8 *param, size_t param_len);
306int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx, 313int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
307 struct sk_buff **skb); 314 struct sk_buff **skb);
315int nci_hci_clear_all_pipes(struct nci_dev *ndev);
308int nci_hci_dev_session_init(struct nci_dev *ndev); 316int nci_hci_dev_session_init(struct nci_dev *ndev);
309 317
310static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev, 318static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev,
@@ -348,9 +356,14 @@ int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 opcode,
348 struct sk_buff *skb); 356 struct sk_buff *skb);
349int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode, 357int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode,
350 struct sk_buff *skb); 358 struct sk_buff *skb);
359int nci_core_rsp_packet(struct nci_dev *ndev, __u16 opcode,
360 struct sk_buff *skb);
361int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode,
362 struct sk_buff *skb);
351void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb); 363void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
352int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload); 364int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
353int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb); 365int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
366int nci_conn_max_data_pkt_payload_size(struct nci_dev *ndev, __u8 conn_id);
354void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb, 367void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
355 __u8 conn_id, int err); 368 __u8 conn_id, int err);
356void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err); 369void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err);
@@ -365,6 +378,7 @@ void nci_clear_target_list(struct nci_dev *ndev);
365void nci_req_complete(struct nci_dev *ndev, int result); 378void nci_req_complete(struct nci_dev *ndev, int result);
366struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev, 379struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
367 int conn_id); 380 int conn_id);
381int nci_get_conn_info_by_id(struct nci_dev *ndev, u8 id);
368 382
369/* ----- NCI status code ----- */ 383/* ----- NCI status code ----- */
370int nci_to_errno(__u8 code); 384int nci_to_errno(__u8 code);
@@ -380,6 +394,12 @@ struct nci_spi {
380 394
381 unsigned int xfer_udelay; /* microseconds delay between 395 unsigned int xfer_udelay; /* microseconds delay between
382 transactions */ 396 transactions */
397
398 unsigned int xfer_speed_hz; /*
399 * SPI clock frequency
400 * 0 => default clock
401 */
402
383 u8 acknowledge_mode; 403 u8 acknowledge_mode;
384 404
385 struct completion req_completion; 405 struct completion req_completion;
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 30afc9a6718c..dcfcfc9c00bf 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -68,7 +68,7 @@ struct nfc_ops {
68 int (*activate_target)(struct nfc_dev *dev, struct nfc_target *target, 68 int (*activate_target)(struct nfc_dev *dev, struct nfc_target *target,
69 u32 protocol); 69 u32 protocol);
70 void (*deactivate_target)(struct nfc_dev *dev, 70 void (*deactivate_target)(struct nfc_dev *dev,
71 struct nfc_target *target); 71 struct nfc_target *target, u8 mode);
72 int (*im_transceive)(struct nfc_dev *dev, struct nfc_target *target, 72 int (*im_transceive)(struct nfc_dev *dev, struct nfc_target *target,
73 struct sk_buff *skb, data_exchange_cb_t cb, 73 struct sk_buff *skb, data_exchange_cb_t cb,
74 void *cb_context); 74 void *cb_context);
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index cf2713d8b975..32cb3e591e07 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -56,6 +56,22 @@ enum nl802154_commands {
56 56
57 /* add new commands above here */ 57 /* add new commands above here */
58 58
59#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
60 NL802154_CMD_SET_SEC_PARAMS,
61 NL802154_CMD_GET_SEC_KEY, /* can dump */
62 NL802154_CMD_NEW_SEC_KEY,
63 NL802154_CMD_DEL_SEC_KEY,
64 NL802154_CMD_GET_SEC_DEV, /* can dump */
65 NL802154_CMD_NEW_SEC_DEV,
66 NL802154_CMD_DEL_SEC_DEV,
67 NL802154_CMD_GET_SEC_DEVKEY, /* can dump */
68 NL802154_CMD_NEW_SEC_DEVKEY,
69 NL802154_CMD_DEL_SEC_DEVKEY,
70 NL802154_CMD_GET_SEC_LEVEL, /* can dump */
71 NL802154_CMD_NEW_SEC_LEVEL,
72 NL802154_CMD_DEL_SEC_LEVEL,
73#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
74
59 /* used to define NL802154_CMD_MAX below */ 75 /* used to define NL802154_CMD_MAX below */
60 __NL802154_CMD_AFTER_LAST, 76 __NL802154_CMD_AFTER_LAST,
61 NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1 77 NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1
@@ -110,6 +126,18 @@ enum nl802154_attrs {
110 126
111 /* add attributes here, update the policy in nl802154.c */ 127 /* add attributes here, update the policy in nl802154.c */
112 128
129#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
130 NL802154_ATTR_SEC_ENABLED,
131 NL802154_ATTR_SEC_OUT_LEVEL,
132 NL802154_ATTR_SEC_OUT_KEY_ID,
133 NL802154_ATTR_SEC_FRAME_COUNTER,
134
135 NL802154_ATTR_SEC_LEVEL,
136 NL802154_ATTR_SEC_DEVICE,
137 NL802154_ATTR_SEC_DEVKEY,
138 NL802154_ATTR_SEC_KEY,
139#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
140
113 __NL802154_ATTR_AFTER_LAST, 141 __NL802154_ATTR_AFTER_LAST,
114 NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1 142 NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1
115}; 143};
@@ -247,4 +275,167 @@ enum nl802154_supported_bool_states {
247 NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1 275 NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
248}; 276};
249 277
278#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
279
280enum nl802154_dev_addr_modes {
281 NL802154_DEV_ADDR_NONE,
282 __NL802154_DEV_ADDR_INVALID,
283 NL802154_DEV_ADDR_SHORT,
284 NL802154_DEV_ADDR_EXTENDED,
285
286 /* keep last */
287 __NL802154_DEV_ADDR_AFTER_LAST,
288 NL802154_DEV_ADDR_MAX = __NL802154_DEV_ADDR_AFTER_LAST - 1
289};
290
291enum nl802154_dev_addr_attrs {
292 NL802154_DEV_ADDR_ATTR_UNSPEC,
293
294 NL802154_DEV_ADDR_ATTR_PAN_ID,
295 NL802154_DEV_ADDR_ATTR_MODE,
296 NL802154_DEV_ADDR_ATTR_SHORT,
297 NL802154_DEV_ADDR_ATTR_EXTENDED,
298
299 /* keep last */
300 __NL802154_DEV_ADDR_ATTR_AFTER_LAST,
301 NL802154_DEV_ADDR_ATTR_MAX = __NL802154_DEV_ADDR_ATTR_AFTER_LAST - 1
302};
303
304enum nl802154_key_id_modes {
305 NL802154_KEY_ID_MODE_IMPLICIT,
306 NL802154_KEY_ID_MODE_INDEX,
307 NL802154_KEY_ID_MODE_INDEX_SHORT,
308 NL802154_KEY_ID_MODE_INDEX_EXTENDED,
309
310 /* keep last */
311 __NL802154_KEY_ID_MODE_AFTER_LAST,
312 NL802154_KEY_ID_MODE_MAX = __NL802154_KEY_ID_MODE_AFTER_LAST - 1
313};
314
315enum nl802154_key_id_attrs {
316 NL802154_KEY_ID_ATTR_UNSPEC,
317
318 NL802154_KEY_ID_ATTR_MODE,
319 NL802154_KEY_ID_ATTR_INDEX,
320 NL802154_KEY_ID_ATTR_IMPLICIT,
321 NL802154_KEY_ID_ATTR_SOURCE_SHORT,
322 NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
323
324 /* keep last */
325 __NL802154_KEY_ID_ATTR_AFTER_LAST,
326 NL802154_KEY_ID_ATTR_MAX = __NL802154_KEY_ID_ATTR_AFTER_LAST - 1
327};
328
329enum nl802154_seclevels {
330 NL802154_SECLEVEL_NONE,
331 NL802154_SECLEVEL_MIC32,
332 NL802154_SECLEVEL_MIC64,
333 NL802154_SECLEVEL_MIC128,
334 NL802154_SECLEVEL_ENC,
335 NL802154_SECLEVEL_ENC_MIC32,
336 NL802154_SECLEVEL_ENC_MIC64,
337 NL802154_SECLEVEL_ENC_MIC128,
338
339 /* keep last */
340 __NL802154_SECLEVEL_AFTER_LAST,
341 NL802154_SECLEVEL_MAX = __NL802154_SECLEVEL_AFTER_LAST - 1
342};
343
344enum nl802154_frames {
345 NL802154_FRAME_BEACON,
346 NL802154_FRAME_DATA,
347 NL802154_FRAME_ACK,
348 NL802154_FRAME_CMD,
349
350 /* keep last */
351 __NL802154_FRAME_AFTER_LAST,
352 NL802154_FRAME_MAX = __NL802154_FRAME_AFTER_LAST - 1
353};
354
355enum nl802154_cmd_frames {
356 __NL802154_CMD_FRAME_INVALID,
357 NL802154_CMD_FRAME_ASSOC_REQUEST,
358 NL802154_CMD_FRAME_ASSOC_RESPONSE,
359 NL802154_CMD_FRAME_DISASSOC_NOTIFY,
360 NL802154_CMD_FRAME_DATA_REQUEST,
361 NL802154_CMD_FRAME_PAN_ID_CONFLICT_NOTIFY,
362 NL802154_CMD_FRAME_ORPHAN_NOTIFY,
363 NL802154_CMD_FRAME_BEACON_REQUEST,
364 NL802154_CMD_FRAME_COORD_REALIGNMENT,
365 NL802154_CMD_FRAME_GTS_REQUEST,
366
367 /* keep last */
368 __NL802154_CMD_FRAME_AFTER_LAST,
369 NL802154_CMD_FRAME_MAX = __NL802154_CMD_FRAME_AFTER_LAST - 1
370};
371
372enum nl802154_seclevel_attrs {
373 NL802154_SECLEVEL_ATTR_UNSPEC,
374
375 NL802154_SECLEVEL_ATTR_LEVELS,
376 NL802154_SECLEVEL_ATTR_FRAME,
377 NL802154_SECLEVEL_ATTR_CMD_FRAME,
378 NL802154_SECLEVEL_ATTR_DEV_OVERRIDE,
379
380 /* keep last */
381 __NL802154_SECLEVEL_ATTR_AFTER_LAST,
382 NL802154_SECLEVEL_ATTR_MAX = __NL802154_SECLEVEL_ATTR_AFTER_LAST - 1
383};
384
385/* TODO what is this? couldn't find in mib */
386enum {
387 NL802154_DEVKEY_IGNORE,
388 NL802154_DEVKEY_RESTRICT,
389 NL802154_DEVKEY_RECORD,
390
391 /* keep last */
392 __NL802154_DEVKEY_AFTER_LAST,
393 NL802154_DEVKEY_MAX = __NL802154_DEVKEY_AFTER_LAST - 1
394};
395
396enum nl802154_dev {
397 NL802154_DEV_ATTR_UNSPEC,
398
399 NL802154_DEV_ATTR_FRAME_COUNTER,
400 NL802154_DEV_ATTR_PAN_ID,
401 NL802154_DEV_ATTR_SHORT_ADDR,
402 NL802154_DEV_ATTR_EXTENDED_ADDR,
403 NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
404 NL802154_DEV_ATTR_KEY_MODE,
405
406 /* keep last */
407 __NL802154_DEV_ATTR_AFTER_LAST,
408 NL802154_DEV_ATTR_MAX = __NL802154_DEV_ATTR_AFTER_LAST - 1
409};
410
411enum nl802154_devkey {
412 NL802154_DEVKEY_ATTR_UNSPEC,
413
414 NL802154_DEVKEY_ATTR_FRAME_COUNTER,
415 NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
416 NL802154_DEVKEY_ATTR_ID,
417
418 /* keep last */
419 __NL802154_DEVKEY_ATTR_AFTER_LAST,
420 NL802154_DEVKEY_ATTR_MAX = __NL802154_DEVKEY_ATTR_AFTER_LAST - 1
421};
422
423enum nl802154_key {
424 NL802154_KEY_ATTR_UNSPEC,
425
426 NL802154_KEY_ATTR_ID,
427 NL802154_KEY_ATTR_USAGE_FRAMES,
428 NL802154_KEY_ATTR_USAGE_CMDS,
429 NL802154_KEY_ATTR_BYTES,
430
431 /* keep last */
432 __NL802154_KEY_ATTR_AFTER_LAST,
433 NL802154_KEY_ATTR_MAX = __NL802154_KEY_ATTR_AFTER_LAST - 1
434};
435
436#define NL802154_KEY_SIZE 16
437#define NL802154_CMD_FRAME_NR_IDS 256
438
439#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
440
250#endif /* __NL802154_H */ 441#endif /* __NL802154_H */
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 87935cad2f7b..a0dde04eb178 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -32,17 +32,17 @@ struct request_sock_ops {
32 int obj_size; 32 int obj_size;
33 struct kmem_cache *slab; 33 struct kmem_cache *slab;
34 char *slab_name; 34 char *slab_name;
35 int (*rtx_syn_ack)(struct sock *sk, 35 int (*rtx_syn_ack)(const struct sock *sk,
36 struct request_sock *req); 36 struct request_sock *req);
37 void (*send_ack)(struct sock *sk, struct sk_buff *skb, 37 void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
38 struct request_sock *req); 38 struct request_sock *req);
39 void (*send_reset)(struct sock *sk, 39 void (*send_reset)(const struct sock *sk,
40 struct sk_buff *skb); 40 struct sk_buff *skb);
41 void (*destructor)(struct request_sock *req); 41 void (*destructor)(struct request_sock *req);
42 void (*syn_ack_timeout)(const struct request_sock *req); 42 void (*syn_ack_timeout)(const struct request_sock *req);
43}; 43};
44 44
45int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req); 45int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
46 46
47/* struct request_sock - mini sock to represent a connection request 47/* struct request_sock - mini sock to represent a connection request
48 */ 48 */
@@ -50,16 +50,15 @@ struct request_sock {
50 struct sock_common __req_common; 50 struct sock_common __req_common;
51#define rsk_refcnt __req_common.skc_refcnt 51#define rsk_refcnt __req_common.skc_refcnt
52#define rsk_hash __req_common.skc_hash 52#define rsk_hash __req_common.skc_hash
53#define rsk_listener __req_common.skc_listener
54#define rsk_window_clamp __req_common.skc_window_clamp
55#define rsk_rcv_wnd __req_common.skc_rcv_wnd
53 56
54 struct request_sock *dl_next; 57 struct request_sock *dl_next;
55 struct sock *rsk_listener;
56 u16 mss; 58 u16 mss;
57 u8 num_retrans; /* number of retransmits */ 59 u8 num_retrans; /* number of retransmits */
58 u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */ 60 u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
59 u8 num_timeout:7; /* number of timeouts */ 61 u8 num_timeout:7; /* number of timeouts */
60 /* The following two fields can be easily recomputed I think -AK */
61 u32 window_clamp; /* window clamp at creation time */
62 u32 rcv_wnd; /* rcv_wnd offered first time */
63 u32 ts_recent; 62 u32 ts_recent;
64 struct timer_list rsk_timer; 63 struct timer_list rsk_timer;
65 const struct request_sock_ops *rsk_ops; 64 const struct request_sock_ops *rsk_ops;
@@ -69,15 +68,35 @@ struct request_sock {
69 u32 peer_secid; 68 u32 peer_secid;
70}; 69};
71 70
71static inline struct request_sock *inet_reqsk(struct sock *sk)
72{
73 return (struct request_sock *)sk;
74}
75
76static inline struct sock *req_to_sk(struct request_sock *req)
77{
78 return (struct sock *)req;
79}
80
72static inline struct request_sock * 81static inline struct request_sock *
73reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener) 82reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
83 bool attach_listener)
74{ 84{
75 struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); 85 struct request_sock *req;
86
87 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
76 88
77 if (req) { 89 if (req) {
78 req->rsk_ops = ops; 90 req->rsk_ops = ops;
79 sock_hold(sk_listener); 91 if (attach_listener) {
80 req->rsk_listener = sk_listener; 92 sock_hold(sk_listener);
93 req->rsk_listener = sk_listener;
94 } else {
95 req->rsk_listener = NULL;
96 }
97 req_to_sk(req)->sk_prot = sk_listener->sk_prot;
98 sk_node_init(&req_to_sk(req)->sk_node);
99 sk_tx_queue_clear(req_to_sk(req));
81 req->saved_syn = NULL; 100 req->saved_syn = NULL;
82 /* Following is temporary. It is coupled with debugging 101 /* Following is temporary. It is coupled with debugging
83 * helpers in reqsk_put() & reqsk_free() 102 * helpers in reqsk_put() & reqsk_free()
@@ -87,16 +106,6 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
87 return req; 106 return req;
88} 107}
89 108
90static inline struct request_sock *inet_reqsk(struct sock *sk)
91{
92 return (struct request_sock *)sk;
93}
94
95static inline struct sock *req_to_sk(struct request_sock *req)
96{
97 return (struct sock *)req;
98}
99
100static inline void reqsk_free(struct request_sock *req) 109static inline void reqsk_free(struct request_sock *req)
101{ 110{
102 /* temporary debugging */ 111 /* temporary debugging */
@@ -117,26 +126,6 @@ static inline void reqsk_put(struct request_sock *req)
117 126
118extern int sysctl_max_syn_backlog; 127extern int sysctl_max_syn_backlog;
119 128
120/** struct listen_sock - listen state
121 *
122 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
123 */
124struct listen_sock {
125 int qlen_inc; /* protected by listener lock */
126 int young_inc;/* protected by listener lock */
127
128 /* following fields can be updated by timer */
129 atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
130 atomic_t young_dec;
131
132 u8 max_qlen_log ____cacheline_aligned_in_smp;
133 u8 synflood_warned;
134 /* 2 bytes hole, try to use */
135 u32 hash_rnd;
136 u32 nr_table_entries;
137 struct request_sock *syn_table[0];
138};
139
140/* 129/*
141 * For a TCP Fast Open listener - 130 * For a TCP Fast Open listener -
142 * lock - protects the access to all the reqsk, which is co-owned by 131 * lock - protects the access to all the reqsk, which is co-owned by
@@ -170,127 +159,72 @@ struct fastopen_queue {
170 * @rskq_accept_head - FIFO head of established children 159 * @rskq_accept_head - FIFO head of established children
171 * @rskq_accept_tail - FIFO tail of established children 160 * @rskq_accept_tail - FIFO tail of established children
172 * @rskq_defer_accept - User waits for some data after accept() 161 * @rskq_defer_accept - User waits for some data after accept()
173 * @syn_wait_lock - serializer
174 *
175 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
176 * lock sock while browsing the listening hash (otherwise it's deadlock prone).
177 * 162 *
178 */ 163 */
179struct request_sock_queue { 164struct request_sock_queue {
165 spinlock_t rskq_lock;
166 u8 rskq_defer_accept;
167
168 u32 synflood_warned;
169 atomic_t qlen;
170 atomic_t young;
171
180 struct request_sock *rskq_accept_head; 172 struct request_sock *rskq_accept_head;
181 struct request_sock *rskq_accept_tail; 173 struct request_sock *rskq_accept_tail;
182 u8 rskq_defer_accept; 174 struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
183 struct listen_sock *listen_opt; 175 * if TFO is enabled.
184 struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
185 * enabled on this listener. Check
186 * max_qlen != 0 in fastopen_queue
187 * to determine if TFO is enabled
188 * right at this moment.
189 */ 176 */
190
191 /* temporary alignment, our goal is to get rid of this lock */
192 spinlock_t syn_wait_lock ____cacheline_aligned_in_smp;
193}; 177};
194 178
195int reqsk_queue_alloc(struct request_sock_queue *queue, 179void reqsk_queue_alloc(struct request_sock_queue *queue);
196 unsigned int nr_table_entries);
197 180
198void __reqsk_queue_destroy(struct request_sock_queue *queue);
199void reqsk_queue_destroy(struct request_sock_queue *queue);
200void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, 181void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
201 bool reset); 182 bool reset);
202 183
203static inline struct request_sock * 184static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
204 reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
205{
206 struct request_sock *req = queue->rskq_accept_head;
207
208 queue->rskq_accept_head = NULL;
209 return req;
210}
211
212static inline int reqsk_queue_empty(struct request_sock_queue *queue)
213{ 185{
214 return queue->rskq_accept_head == NULL; 186 return queue->rskq_accept_head == NULL;
215} 187}
216 188
217static inline void reqsk_queue_add(struct request_sock_queue *queue, 189static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
218 struct request_sock *req, 190 struct sock *parent)
219 struct sock *parent,
220 struct sock *child)
221{ 191{
222 req->sk = child; 192 struct request_sock *req;
223 sk_acceptq_added(parent);
224
225 if (queue->rskq_accept_head == NULL)
226 queue->rskq_accept_head = req;
227 else
228 queue->rskq_accept_tail->dl_next = req;
229
230 queue->rskq_accept_tail = req;
231 req->dl_next = NULL;
232}
233
234static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
235{
236 struct request_sock *req = queue->rskq_accept_head;
237
238 WARN_ON(req == NULL);
239
240 queue->rskq_accept_head = req->dl_next;
241 if (queue->rskq_accept_head == NULL)
242 queue->rskq_accept_tail = NULL;
243 193
194 spin_lock_bh(&queue->rskq_lock);
195 req = queue->rskq_accept_head;
196 if (req) {
197 sk_acceptq_removed(parent);
198 queue->rskq_accept_head = req->dl_next;
199 if (queue->rskq_accept_head == NULL)
200 queue->rskq_accept_tail = NULL;
201 }
202 spin_unlock_bh(&queue->rskq_lock);
244 return req; 203 return req;
245} 204}
246 205
247static inline void reqsk_queue_removed(struct request_sock_queue *queue, 206static inline void reqsk_queue_removed(struct request_sock_queue *queue,
248 const struct request_sock *req) 207 const struct request_sock *req)
249{ 208{
250 struct listen_sock *lopt = queue->listen_opt;
251
252 if (req->num_timeout == 0) 209 if (req->num_timeout == 0)
253 atomic_inc(&lopt->young_dec); 210 atomic_dec(&queue->young);
254 atomic_inc(&lopt->qlen_dec); 211 atomic_dec(&queue->qlen);
255} 212}
256 213
257static inline void reqsk_queue_added(struct request_sock_queue *queue) 214static inline void reqsk_queue_added(struct request_sock_queue *queue)
258{ 215{
259 struct listen_sock *lopt = queue->listen_opt; 216 atomic_inc(&queue->young);
260 217 atomic_inc(&queue->qlen);
261 lopt->young_inc++;
262 lopt->qlen_inc++;
263}
264
265static inline int listen_sock_qlen(const struct listen_sock *lopt)
266{
267 return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
268}
269
270static inline int listen_sock_young(const struct listen_sock *lopt)
271{
272 return lopt->young_inc - atomic_read(&lopt->young_dec);
273} 218}
274 219
275static inline int reqsk_queue_len(const struct request_sock_queue *queue) 220static inline int reqsk_queue_len(const struct request_sock_queue *queue)
276{ 221{
277 const struct listen_sock *lopt = queue->listen_opt; 222 return atomic_read(&queue->qlen);
278
279 return lopt ? listen_sock_qlen(lopt) : 0;
280} 223}
281 224
282static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) 225static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
283{ 226{
284 return listen_sock_young(queue->listen_opt); 227 return atomic_read(&queue->young);
285} 228}
286 229
287static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
288{
289 return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log;
290}
291
292void reqsk_queue_hash_req(struct request_sock_queue *queue,
293 u32 hash, struct request_sock *req,
294 unsigned long timeout);
295
296#endif /* _REQUEST_SOCK_H */ 230#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/route.h b/include/net/route.h
index f46af256880c..ee81307863d5 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -28,6 +28,8 @@
28#include <net/inetpeer.h> 28#include <net/inetpeer.h>
29#include <net/flow.h> 29#include <net/flow.h>
30#include <net/inet_sock.h> 30#include <net/inet_sock.h>
31#include <net/ip_fib.h>
32#include <net/l3mdev.h>
31#include <linux/in_route.h> 33#include <linux/in_route.h>
32#include <linux/rtnetlink.h> 34#include <linux/rtnetlink.h>
33#include <linux/rcupdate.h> 35#include <linux/rcupdate.h>
@@ -64,6 +66,8 @@ struct rtable {
64 /* Miscellaneous cached information */ 66 /* Miscellaneous cached information */
65 u32 rt_pmtu; 67 u32 rt_pmtu;
66 68
69 u32 rt_table_id;
70
67 struct list_head rt_uncached; 71 struct list_head rt_uncached;
68 struct uncached_list *rt_uncached_list; 72 struct uncached_list *rt_uncached_list;
69}; 73};
@@ -110,9 +114,17 @@ struct in_device;
110int ip_rt_init(void); 114int ip_rt_init(void);
111void rt_cache_flush(struct net *net); 115void rt_cache_flush(struct net *net);
112void rt_flush_dev(struct net_device *dev); 116void rt_flush_dev(struct net_device *dev);
113struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp); 117struct rtable *__ip_route_output_key_hash(struct net *, struct flowi4 *flp,
118 int mp_hash);
119
120static inline struct rtable *__ip_route_output_key(struct net *net,
121 struct flowi4 *flp)
122{
123 return __ip_route_output_key_hash(net, flp, -1);
124}
125
114struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, 126struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
115 struct sock *sk); 127 const struct sock *sk);
116struct dst_entry *ipv4_blackhole_route(struct net *net, 128struct dst_entry *ipv4_blackhole_route(struct net *net,
117 struct dst_entry *dst_orig); 129 struct dst_entry *dst_orig);
118 130
@@ -254,9 +266,6 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
254 if (inet_sk(sk)->transparent) 266 if (inet_sk(sk)->transparent)
255 flow_flags |= FLOWI_FLAG_ANYSRC; 267 flow_flags |= FLOWI_FLAG_ANYSRC;
256 268
257 if (netif_index_is_vrf(sock_net(sk), oif))
258 flow_flags |= FLOWI_FLAG_VRFSRC | FLOWI_FLAG_SKIP_NH_OIF;
259
260 flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, 269 flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
261 protocol, flow_flags, dst, src, dport, sport); 270 protocol, flow_flags, dst, src, dport, sport);
262} 271}
@@ -273,6 +282,10 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
273 ip_route_connect_init(fl4, dst, src, tos, oif, protocol, 282 ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
274 sport, dport, sk); 283 sport, dport, sk);
275 284
285 if (!src && oif) {
286 l3mdev_get_saddr(net, oif, fl4);
287 src = fl4->saddr;
288 }
276 if (!dst || !src) { 289 if (!dst || !src) {
277 rt = __ip_route_output_key(net, fl4); 290 rt = __ip_route_output_key(net, fl4);
278 if (IS_ERR(rt)) 291 if (IS_ERR(rt))
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 18fdb98185ab..2f87c1ba13de 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -122,8 +122,10 @@ struct rtnl_af_ops {
122 int family; 122 int family;
123 123
124 int (*fill_link_af)(struct sk_buff *skb, 124 int (*fill_link_af)(struct sk_buff *skb,
125 const struct net_device *dev); 125 const struct net_device *dev,
126 size_t (*get_link_af_size)(const struct net_device *dev); 126 u32 ext_filter_mask);
127 size_t (*get_link_af_size)(const struct net_device *dev,
128 u32 ext_filter_mask);
127 129
128 int (*validate_link_af)(const struct net_device *dev, 130 int (*validate_link_af)(const struct net_device *dev,
129 const struct nlattr *attr); 131 const struct nlattr *attr);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 444faa89a55f..4c79ce8c1f92 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -251,7 +251,7 @@ struct tcf_proto {
251struct qdisc_skb_cb { 251struct qdisc_skb_cb {
252 unsigned int pkt_len; 252 unsigned int pkt_len;
253 u16 slave_dev_queue_mapping; 253 u16 slave_dev_queue_mapping;
254 u16 _pad; 254 u16 tc_classid;
255#define QDISC_CB_PRIV_LEN 20 255#define QDISC_CB_PRIV_LEN 20
256 unsigned char data[QDISC_CB_PRIV_LEN]; 256 unsigned char data[QDISC_CB_PRIV_LEN];
257}; 257};
@@ -402,6 +402,7 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
402 const struct qdisc_size_table *stab); 402 const struct qdisc_size_table *stab);
403bool tcf_destroy(struct tcf_proto *tp, bool force); 403bool tcf_destroy(struct tcf_proto *tp, bool force);
404void tcf_destroy_chain(struct tcf_proto __rcu **fl); 404void tcf_destroy_chain(struct tcf_proto __rcu **fl);
405int skb_do_redirect(struct sk_buff *);
405 406
406/* Reset all TX qdiscs greater then index of a device. */ 407/* Reset all TX qdiscs greater then index of a device. */
407static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) 408static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
diff --git a/include/net/sock.h b/include/net/sock.h
index 7aa78440559a..7f89e4ba18d1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -150,6 +150,10 @@ typedef __u64 __bitwise __addrpair;
150 * @skc_node: main hash linkage for various protocol lookup tables 150 * @skc_node: main hash linkage for various protocol lookup tables
151 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol 151 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
152 * @skc_tx_queue_mapping: tx queue number for this connection 152 * @skc_tx_queue_mapping: tx queue number for this connection
153 * @skc_flags: place holder for sk_flags
154 * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
155 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
156 * @skc_incoming_cpu: record/match cpu processing incoming packets
153 * @skc_refcnt: reference count 157 * @skc_refcnt: reference count
154 * 158 *
155 * This is the minimal network layer representation of sockets, the header 159 * This is the minimal network layer representation of sockets, the header
@@ -200,6 +204,16 @@ struct sock_common {
200 204
201 atomic64_t skc_cookie; 205 atomic64_t skc_cookie;
202 206
207 /* following fields are padding to force
208 * offset(struct sock, sk_refcnt) == 128 on 64bit arches
209 * assuming IPV6 is enabled. We use this padding differently
210 * for different kind of 'sockets'
211 */
212 union {
213 unsigned long skc_flags;
214 struct sock *skc_listener; /* request_sock */
215 struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */
216 };
203 /* 217 /*
204 * fields between dontcopy_begin/dontcopy_end 218 * fields between dontcopy_begin/dontcopy_end
205 * are not copied in sock_copy() 219 * are not copied in sock_copy()
@@ -212,9 +226,20 @@ struct sock_common {
212 struct hlist_nulls_node skc_nulls_node; 226 struct hlist_nulls_node skc_nulls_node;
213 }; 227 };
214 int skc_tx_queue_mapping; 228 int skc_tx_queue_mapping;
229 union {
230 int skc_incoming_cpu;
231 u32 skc_rcv_wnd;
232 u32 skc_tw_rcv_nxt; /* struct tcp_timewait_sock */
233 };
234
215 atomic_t skc_refcnt; 235 atomic_t skc_refcnt;
216 /* private: */ 236 /* private: */
217 int skc_dontcopy_end[0]; 237 int skc_dontcopy_end[0];
238 union {
239 u32 skc_rxhash;
240 u32 skc_window_clamp;
241 u32 skc_tw_snd_nxt; /* struct tcp_timewait_sock */
242 };
218 /* public: */ 243 /* public: */
219}; 244};
220 245
@@ -243,8 +268,6 @@ struct cg_proto;
243 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) 268 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
244 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) 269 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
245 * @sk_sndbuf: size of send buffer in bytes 270 * @sk_sndbuf: size of send buffer in bytes
246 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
247 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
248 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets 271 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
249 * @sk_no_check_rx: allow zero checksum in RX packets 272 * @sk_no_check_rx: allow zero checksum in RX packets
250 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 273 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
@@ -273,8 +296,6 @@ struct cg_proto;
273 * @sk_rcvlowat: %SO_RCVLOWAT setting 296 * @sk_rcvlowat: %SO_RCVLOWAT setting
274 * @sk_rcvtimeo: %SO_RCVTIMEO setting 297 * @sk_rcvtimeo: %SO_RCVTIMEO setting
275 * @sk_sndtimeo: %SO_SNDTIMEO setting 298 * @sk_sndtimeo: %SO_SNDTIMEO setting
276 * @sk_rxhash: flow hash received from netif layer
277 * @sk_incoming_cpu: record cpu processing incoming packets
278 * @sk_txhash: computed flow hash for use on transmit 299 * @sk_txhash: computed flow hash for use on transmit
279 * @sk_filter: socket filtering instructions 300 * @sk_filter: socket filtering instructions
280 * @sk_timer: sock cleanup timer 301 * @sk_timer: sock cleanup timer
@@ -331,6 +352,9 @@ struct sock {
331#define sk_v6_daddr __sk_common.skc_v6_daddr 352#define sk_v6_daddr __sk_common.skc_v6_daddr
332#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr 353#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
333#define sk_cookie __sk_common.skc_cookie 354#define sk_cookie __sk_common.skc_cookie
355#define sk_incoming_cpu __sk_common.skc_incoming_cpu
356#define sk_flags __sk_common.skc_flags
357#define sk_rxhash __sk_common.skc_rxhash
334 358
335 socket_lock_t sk_lock; 359 socket_lock_t sk_lock;
336 struct sk_buff_head sk_receive_queue; 360 struct sk_buff_head sk_receive_queue;
@@ -350,14 +374,6 @@ struct sock {
350 } sk_backlog; 374 } sk_backlog;
351#define sk_rmem_alloc sk_backlog.rmem_alloc 375#define sk_rmem_alloc sk_backlog.rmem_alloc
352 int sk_forward_alloc; 376 int sk_forward_alloc;
353#ifdef CONFIG_RPS
354 __u32 sk_rxhash;
355#endif
356 u16 sk_incoming_cpu;
357 /* 16bit hole
358 * Warned : sk_incoming_cpu can be set from softirq,
359 * Do not use this hole without fully understanding possible issues.
360 */
361 377
362 __u32 sk_txhash; 378 __u32 sk_txhash;
363#ifdef CONFIG_NET_RX_BUSY_POLL 379#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -373,7 +389,6 @@ struct sock {
373#ifdef CONFIG_XFRM 389#ifdef CONFIG_XFRM
374 struct xfrm_policy *sk_policy[2]; 390 struct xfrm_policy *sk_policy[2];
375#endif 391#endif
376 unsigned long sk_flags;
377 struct dst_entry *sk_rx_dst; 392 struct dst_entry *sk_rx_dst;
378 struct dst_entry __rcu *sk_dst_cache; 393 struct dst_entry __rcu *sk_dst_cache;
379 spinlock_t sk_dst_lock; 394 spinlock_t sk_dst_lock;
@@ -759,7 +774,7 @@ static inline int sk_memalloc_socks(void)
759 774
760#endif 775#endif
761 776
762static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask) 777static inline gfp_t sk_gfp_atomic(const struct sock *sk, gfp_t gfp_mask)
763{ 778{
764 return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC); 779 return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC);
765} 780}
@@ -828,6 +843,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
828 if (sk_rcvqueues_full(sk, limit)) 843 if (sk_rcvqueues_full(sk, limit))
829 return -ENOBUFS; 844 return -ENOBUFS;
830 845
846 /*
847 * If the skb was allocated from pfmemalloc reserves, only
848 * allow SOCK_MEMALLOC sockets to use it as this socket is
849 * helping free memory
850 */
851 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
852 return -ENOMEM;
853
831 __sk_add_backlog(sk, skb); 854 __sk_add_backlog(sk, skb);
832 sk->sk_backlog.len += skb->truesize; 855 sk->sk_backlog.len += skb->truesize;
833 return 0; 856 return 0;
@@ -1514,6 +1537,13 @@ void sock_kfree_s(struct sock *sk, void *mem, int size);
1514void sock_kzfree_s(struct sock *sk, void *mem, int size); 1537void sock_kzfree_s(struct sock *sk, void *mem, int size);
1515void sk_send_sigurg(struct sock *sk); 1538void sk_send_sigurg(struct sock *sk);
1516 1539
1540struct sockcm_cookie {
1541 u32 mark;
1542};
1543
1544int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1545 struct sockcm_cookie *sockc);
1546
1517/* 1547/*
1518 * Functions to fill in entries in struct proto_ops when a protocol 1548 * Functions to fill in entries in struct proto_ops when a protocol
1519 * does not implement a particular function. 1549 * does not implement a particular function.
@@ -1654,12 +1684,16 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
1654kuid_t sock_i_uid(struct sock *sk); 1684kuid_t sock_i_uid(struct sock *sk);
1655unsigned long sock_i_ino(struct sock *sk); 1685unsigned long sock_i_ino(struct sock *sk);
1656 1686
1657static inline void sk_set_txhash(struct sock *sk) 1687static inline u32 net_tx_rndhash(void)
1658{ 1688{
1659 sk->sk_txhash = prandom_u32(); 1689 u32 v = prandom_u32();
1660 1690
1661 if (unlikely(!sk->sk_txhash)) 1691 return v ?: 1;
1662 sk->sk_txhash = 1; 1692}
1693
1694static inline void sk_set_txhash(struct sock *sk)
1695{
1696 sk->sk_txhash = net_tx_rndhash();
1663} 1697}
1664 1698
1665static inline void sk_rethink_txhash(struct sock *sk) 1699static inline void sk_rethink_txhash(struct sock *sk)
@@ -1917,6 +1951,8 @@ static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
1917 } 1951 }
1918} 1952}
1919 1953
1954void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
1955
1920/* 1956/*
1921 * Queue a received datagram if it will fit. Stream and sequenced 1957 * Queue a received datagram if it will fit. Stream and sequenced
1922 * protocols can't normally use this as they need to fit buffers in 1958 * protocols can't normally use this as they need to fit buffers in
@@ -1925,21 +1961,6 @@ static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
1925 * Inlined as it's very short and called for pretty much every 1961 * Inlined as it's very short and called for pretty much every
1926 * packet ever received. 1962 * packet ever received.
1927 */ 1963 */
1928
1929static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1930{
1931 skb_orphan(skb);
1932 skb->sk = sk;
1933 skb->destructor = sock_wfree;
1934 skb_set_hash_from_sk(skb, sk);
1935 /*
1936 * We used to take a refcount on sk, but following operation
1937 * is enough to guarantee sk_free() wont free this sock until
1938 * all in-flight packets are completed
1939 */
1940 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1941}
1942
1943static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 1964static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1944{ 1965{
1945 skb_orphan(skb); 1966 skb_orphan(skb);
@@ -2020,7 +2041,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2020 */ 2041 */
2021static inline struct page_frag *sk_page_frag(struct sock *sk) 2042static inline struct page_frag *sk_page_frag(struct sock *sk)
2022{ 2043{
2023 if (sk->sk_allocation & __GFP_WAIT) 2044 if (gfpflags_allow_blocking(sk->sk_allocation))
2024 return &current->task_frag; 2045 return &current->task_frag;
2025 2046
2026 return &sk->sk_frag; 2047 return &sk->sk_frag;
@@ -2197,6 +2218,39 @@ static inline bool sk_fullsock(const struct sock *sk)
2197 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); 2218 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
2198} 2219}
2199 2220
2221/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
2222 * SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
2223 */
2224static inline bool sk_listener(const struct sock *sk)
2225{
2226 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2227}
2228
2229/**
2230 * sk_state_load - read sk->sk_state for lockless contexts
2231 * @sk: socket pointer
2232 *
2233 * Paired with sk_state_store(). Used in places we do not hold socket lock :
2234 * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
2235 */
2236static inline int sk_state_load(const struct sock *sk)
2237{
2238 return smp_load_acquire(&sk->sk_state);
2239}
2240
2241/**
2242 * sk_state_store - update sk->sk_state
2243 * @sk: socket pointer
2244 * @newstate: new state
2245 *
2246 * Paired with sk_state_load(). Should be used in contexts where
2247 * state change might impact lockless readers.
2248 */
2249static inline void sk_state_store(struct sock *sk, int newstate)
2250{
2251 smp_store_release(&sk->sk_state, newstate);
2252}
2253
2200void sock_enable_timestamp(struct sock *sk, int flag); 2254void sock_enable_timestamp(struct sock *sk, int flag);
2201int sock_get_timestamp(struct sock *, struct timeval __user *); 2255int sock_get_timestamp(struct sock *, struct timeval __user *);
2202int sock_get_timestampns(struct sock *, struct timespec __user *); 2256int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 319baab3b48e..1d22ce9f352e 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * include/net/switchdev.h - Switch device API 2 * include/net/switchdev.h - Switch device API
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> 3 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com> 4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
@@ -13,70 +13,109 @@
13 13
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/notifier.h> 15#include <linux/notifier.h>
16#include <linux/list.h>
17#include <net/ip_fib.h>
16 18
17#define SWITCHDEV_F_NO_RECURSE BIT(0) 19#define SWITCHDEV_F_NO_RECURSE BIT(0)
20#define SWITCHDEV_F_SKIP_EOPNOTSUPP BIT(1)
21#define SWITCHDEV_F_DEFER BIT(2)
18 22
19enum switchdev_trans { 23struct switchdev_trans_item {
20 SWITCHDEV_TRANS_NONE, 24 struct list_head list;
21 SWITCHDEV_TRANS_PREPARE, 25 void *data;
22 SWITCHDEV_TRANS_ABORT, 26 void (*destructor)(const void *data);
23 SWITCHDEV_TRANS_COMMIT,
24}; 27};
25 28
29struct switchdev_trans {
30 struct list_head item_list;
31 bool ph_prepare;
32};
33
34static inline bool switchdev_trans_ph_prepare(struct switchdev_trans *trans)
35{
36 return trans && trans->ph_prepare;
37}
38
39static inline bool switchdev_trans_ph_commit(struct switchdev_trans *trans)
40{
41 return trans && !trans->ph_prepare;
42}
43
26enum switchdev_attr_id { 44enum switchdev_attr_id {
27 SWITCHDEV_ATTR_UNDEFINED, 45 SWITCHDEV_ATTR_ID_UNDEFINED,
28 SWITCHDEV_ATTR_PORT_PARENT_ID, 46 SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
29 SWITCHDEV_ATTR_PORT_STP_STATE, 47 SWITCHDEV_ATTR_ID_PORT_STP_STATE,
30 SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS, 48 SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
49 SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
31}; 50};
32 51
33struct switchdev_attr { 52struct switchdev_attr {
34 enum switchdev_attr_id id; 53 enum switchdev_attr_id id;
35 enum switchdev_trans trans;
36 u32 flags; 54 u32 flags;
37 union { 55 union {
38 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ 56 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
39 u8 stp_state; /* PORT_STP_STATE */ 57 u8 stp_state; /* PORT_STP_STATE */
40 unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ 58 unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
59 u32 ageing_time; /* BRIDGE_AGEING_TIME */
41 } u; 60 } u;
42}; 61};
43 62
44struct fib_info;
45
46enum switchdev_obj_id { 63enum switchdev_obj_id {
47 SWITCHDEV_OBJ_UNDEFINED, 64 SWITCHDEV_OBJ_ID_UNDEFINED,
48 SWITCHDEV_OBJ_PORT_VLAN, 65 SWITCHDEV_OBJ_ID_PORT_VLAN,
49 SWITCHDEV_OBJ_IPV4_FIB, 66 SWITCHDEV_OBJ_ID_IPV4_FIB,
50 SWITCHDEV_OBJ_PORT_FDB, 67 SWITCHDEV_OBJ_ID_PORT_FDB,
51}; 68};
52 69
53struct switchdev_obj { 70struct switchdev_obj {
54 enum switchdev_obj_id id; 71 enum switchdev_obj_id id;
55 enum switchdev_trans trans; 72 u32 flags;
56 int (*cb)(struct net_device *dev, struct switchdev_obj *obj); 73};
57 union { 74
58 struct switchdev_obj_vlan { /* PORT_VLAN */ 75/* SWITCHDEV_OBJ_ID_PORT_VLAN */
59 u16 flags; 76struct switchdev_obj_port_vlan {
60 u16 vid_begin; 77 struct switchdev_obj obj;
61 u16 vid_end; 78 u16 flags;
62 } vlan; 79 u16 vid_begin;
63 struct switchdev_obj_ipv4_fib { /* IPV4_FIB */ 80 u16 vid_end;
64 u32 dst; 81};
65 int dst_len; 82
66 struct fib_info *fi; 83#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
67 u8 tos; 84 container_of(obj, struct switchdev_obj_port_vlan, obj)
68 u8 type; 85
69 u32 nlflags; 86/* SWITCHDEV_OBJ_ID_IPV4_FIB */
70 u32 tb_id; 87struct switchdev_obj_ipv4_fib {
71 } ipv4_fib; 88 struct switchdev_obj obj;
72 struct switchdev_obj_fdb { /* PORT_FDB */ 89 u32 dst;
73 const unsigned char *addr; 90 int dst_len;
74 u16 vid; 91 struct fib_info fi;
75 u16 ndm_state; 92 u8 tos;
76 } fdb; 93 u8 type;
77 } u; 94 u32 nlflags;
95 u32 tb_id;
96};
97
98#define SWITCHDEV_OBJ_IPV4_FIB(obj) \
99 container_of(obj, struct switchdev_obj_ipv4_fib, obj)
100
101/* SWITCHDEV_OBJ_ID_PORT_FDB */
102struct switchdev_obj_port_fdb {
103 struct switchdev_obj obj;
104 unsigned char addr[ETH_ALEN];
105 u16 vid;
106 u16 ndm_state;
78}; 107};
79 108
109#define SWITCHDEV_OBJ_PORT_FDB(obj) \
110 container_of(obj, struct switchdev_obj_port_fdb, obj)
111
112void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
113 void *data, void (*destructor)(void const *),
114 struct switchdev_trans_item *tritem);
115void *switchdev_trans_item_dequeue(struct switchdev_trans *trans);
116
117typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
118
80/** 119/**
81 * struct switchdev_ops - switchdev operations 120 * struct switchdev_ops - switchdev operations
82 * 121 *
@@ -84,23 +123,26 @@ struct switchdev_obj {
84 * 123 *
85 * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr). 124 * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
86 * 125 *
87 * @switchdev_port_obj_add: Add an object to port (see switchdev_obj). 126 * @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*).
88 * 127 *
89 * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj). 128 * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*).
90 * 129 *
91 * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj). 130 * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj_*).
92 */ 131 */
93struct switchdev_ops { 132struct switchdev_ops {
94 int (*switchdev_port_attr_get)(struct net_device *dev, 133 int (*switchdev_port_attr_get)(struct net_device *dev,
95 struct switchdev_attr *attr); 134 struct switchdev_attr *attr);
96 int (*switchdev_port_attr_set)(struct net_device *dev, 135 int (*switchdev_port_attr_set)(struct net_device *dev,
97 struct switchdev_attr *attr); 136 const struct switchdev_attr *attr,
137 struct switchdev_trans *trans);
98 int (*switchdev_port_obj_add)(struct net_device *dev, 138 int (*switchdev_port_obj_add)(struct net_device *dev,
99 struct switchdev_obj *obj); 139 const struct switchdev_obj *obj,
140 struct switchdev_trans *trans);
100 int (*switchdev_port_obj_del)(struct net_device *dev, 141 int (*switchdev_port_obj_del)(struct net_device *dev,
101 struct switchdev_obj *obj); 142 const struct switchdev_obj *obj);
102 int (*switchdev_port_obj_dump)(struct net_device *dev, 143 int (*switchdev_port_obj_dump)(struct net_device *dev,
103 struct switchdev_obj *obj); 144 struct switchdev_obj *obj,
145 switchdev_obj_dump_cb_t *cb);
104}; 146};
105 147
106enum switchdev_notifier_type { 148enum switchdev_notifier_type {
@@ -126,13 +168,17 @@ switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
126 168
127#ifdef CONFIG_NET_SWITCHDEV 169#ifdef CONFIG_NET_SWITCHDEV
128 170
171void switchdev_deferred_process(void);
129int switchdev_port_attr_get(struct net_device *dev, 172int switchdev_port_attr_get(struct net_device *dev,
130 struct switchdev_attr *attr); 173 struct switchdev_attr *attr);
131int switchdev_port_attr_set(struct net_device *dev, 174int switchdev_port_attr_set(struct net_device *dev,
132 struct switchdev_attr *attr); 175 const struct switchdev_attr *attr);
133int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj); 176int switchdev_port_obj_add(struct net_device *dev,
134int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj); 177 const struct switchdev_obj *obj);
135int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj); 178int switchdev_port_obj_del(struct net_device *dev,
179 const struct switchdev_obj *obj);
180int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
181 switchdev_obj_dump_cb_t *cb);
136int register_switchdev_notifier(struct notifier_block *nb); 182int register_switchdev_notifier(struct notifier_block *nb);
137int unregister_switchdev_notifier(struct notifier_block *nb); 183int unregister_switchdev_notifier(struct notifier_block *nb);
138int call_switchdev_notifiers(unsigned long val, struct net_device *dev, 184int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
@@ -164,6 +210,10 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
164 210
165#else 211#else
166 212
213static inline void switchdev_deferred_process(void)
214{
215}
216
167static inline int switchdev_port_attr_get(struct net_device *dev, 217static inline int switchdev_port_attr_get(struct net_device *dev,
168 struct switchdev_attr *attr) 218 struct switchdev_attr *attr)
169{ 219{
@@ -171,25 +221,26 @@ static inline int switchdev_port_attr_get(struct net_device *dev,
171} 221}
172 222
173static inline int switchdev_port_attr_set(struct net_device *dev, 223static inline int switchdev_port_attr_set(struct net_device *dev,
174 struct switchdev_attr *attr) 224 const struct switchdev_attr *attr)
175{ 225{
176 return -EOPNOTSUPP; 226 return -EOPNOTSUPP;
177} 227}
178 228
179static inline int switchdev_port_obj_add(struct net_device *dev, 229static inline int switchdev_port_obj_add(struct net_device *dev,
180 struct switchdev_obj *obj) 230 const struct switchdev_obj *obj)
181{ 231{
182 return -EOPNOTSUPP; 232 return -EOPNOTSUPP;
183} 233}
184 234
185static inline int switchdev_port_obj_del(struct net_device *dev, 235static inline int switchdev_port_obj_del(struct net_device *dev,
186 struct switchdev_obj *obj) 236 const struct switchdev_obj *obj)
187{ 237{
188 return -EOPNOTSUPP; 238 return -EOPNOTSUPP;
189} 239}
190 240
191static inline int switchdev_port_obj_dump(struct net_device *dev, 241static inline int switchdev_port_obj_dump(struct net_device *dev,
192 struct switchdev_obj *obj) 242 const struct switchdev_obj *obj,
243 switchdev_obj_dump_cb_t *cb)
193{ 244{
194 return -EOPNOTSUPP; 245 return -EOPNOTSUPP;
195} 246}
@@ -272,7 +323,7 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
272 struct net_device *filter_dev, 323 struct net_device *filter_dev,
273 int idx) 324 int idx)
274{ 325{
275 return -EOPNOTSUPP; 326 return idx;
276} 327}
277 328
278static inline void switchdev_port_fwd_mark_set(struct net_device *dev, 329static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h
index 5c1104c2e24f..02caa406611b 100644
--- a/include/net/tc_act/tc_connmark.h
+++ b/include/net/tc_act/tc_connmark.h
@@ -5,6 +5,7 @@
5 5
6struct tcf_connmark_info { 6struct tcf_connmark_info {
7 struct tcf_common common; 7 struct tcf_common common;
8 struct net *net;
8 u16 zone; 9 u16 zone;
9}; 10};
10 11
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0cab28cd43a9..f80e74c5ad18 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -279,6 +279,7 @@ extern int sysctl_tcp_limit_output_bytes;
279extern int sysctl_tcp_challenge_ack_limit; 279extern int sysctl_tcp_challenge_ack_limit;
280extern unsigned int sysctl_tcp_notsent_lowat; 280extern unsigned int sysctl_tcp_notsent_lowat;
281extern int sysctl_tcp_min_tso_segs; 281extern int sysctl_tcp_min_tso_segs;
282extern int sysctl_tcp_min_rtt_wlen;
282extern int sysctl_tcp_autocorking; 283extern int sysctl_tcp_autocorking;
283extern int sysctl_tcp_invalid_ratelimit; 284extern int sysctl_tcp_invalid_ratelimit;
284extern int sysctl_tcp_pacing_ss_ratio; 285extern int sysctl_tcp_pacing_ss_ratio;
@@ -365,8 +366,7 @@ void tcp_wfree(struct sk_buff *skb);
365void tcp_write_timer_handler(struct sock *sk); 366void tcp_write_timer_handler(struct sock *sk);
366void tcp_delack_timer_handler(struct sock *sk); 367void tcp_delack_timer_handler(struct sock *sk);
367int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); 368int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
368int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 369int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
369 const struct tcphdr *th, unsigned int len);
370void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 370void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
371 const struct tcphdr *th, unsigned int len); 371 const struct tcphdr *th, unsigned int len);
372void tcp_rcv_space_adjust(struct sock *sk); 372void tcp_rcv_space_adjust(struct sock *sk);
@@ -451,19 +451,22 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
451void tcp_v4_mtu_reduced(struct sock *sk); 451void tcp_v4_mtu_reduced(struct sock *sk);
452void tcp_req_err(struct sock *sk, u32 seq); 452void tcp_req_err(struct sock *sk, u32 seq);
453int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 453int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
454struct sock *tcp_create_openreq_child(struct sock *sk, 454struct sock *tcp_create_openreq_child(const struct sock *sk,
455 struct request_sock *req, 455 struct request_sock *req,
456 struct sk_buff *skb); 456 struct sk_buff *skb);
457void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst); 457void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
458struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, 458struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
459 struct request_sock *req, 459 struct request_sock *req,
460 struct dst_entry *dst); 460 struct dst_entry *dst,
461 struct request_sock *req_unhash,
462 bool *own_req);
461int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); 463int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
462int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 464int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
463int tcp_connect(struct sock *sk); 465int tcp_connect(struct sock *sk);
464struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 466struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
465 struct request_sock *req, 467 struct request_sock *req,
466 struct tcp_fastopen_cookie *foc); 468 struct tcp_fastopen_cookie *foc,
469 bool attach_req);
467int tcp_disconnect(struct sock *sk, int flags); 470int tcp_disconnect(struct sock *sk, int flags);
468 471
469void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); 472void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -492,8 +495,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
492 495
493/* syncookies: remember time of last synqueue overflow 496/* syncookies: remember time of last synqueue overflow
494 * But do not dirty this field too often (once per second is enough) 497 * But do not dirty this field too often (once per second is enough)
498 * It is racy as we do not hold a lock, but race is very minor.
495 */ 499 */
496static inline void tcp_synq_overflow(struct sock *sk) 500static inline void tcp_synq_overflow(const struct sock *sk)
497{ 501{
498 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 502 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
499 unsigned long now = jiffies; 503 unsigned long now = jiffies;
@@ -520,8 +524,7 @@ static inline u32 tcp_cookie_time(void)
520 524
521u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, 525u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
522 u16 *mssp); 526 u16 *mssp);
523__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb, 527__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
524 __u16 *mss);
525__u32 cookie_init_timestamp(struct request_sock *req); 528__u32 cookie_init_timestamp(struct request_sock *req);
526bool cookie_timestamp_decode(struct tcp_options_received *opt); 529bool cookie_timestamp_decode(struct tcp_options_received *opt);
527bool cookie_ecn_ok(const struct tcp_options_received *opt, 530bool cookie_ecn_ok(const struct tcp_options_received *opt,
@@ -534,8 +537,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
534 537
535u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, 538u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
536 const struct tcphdr *th, u16 *mssp); 539 const struct tcphdr *th, u16 *mssp);
537__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, 540__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
538 __u16 *mss);
539#endif 541#endif
540/* tcp_output.c */ 542/* tcp_output.c */
541 543
@@ -565,7 +567,9 @@ bool tcp_schedule_loss_probe(struct sock *sk);
565/* tcp_input.c */ 567/* tcp_input.c */
566void tcp_resume_early_retransmit(struct sock *sk); 568void tcp_resume_early_retransmit(struct sock *sk);
567void tcp_rearm_rto(struct sock *sk); 569void tcp_rearm_rto(struct sock *sk);
570void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
568void tcp_reset(struct sock *sk); 571void tcp_reset(struct sock *sk);
572void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
569 573
570/* tcp_timer.c */ 574/* tcp_timer.c */
571void tcp_init_xmit_timers(struct sock *); 575void tcp_init_xmit_timers(struct sock *);
@@ -671,6 +675,12 @@ static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
671 return dst_metric_locked(dst, RTAX_CC_ALGO); 675 return dst_metric_locked(dst, RTAX_CC_ALGO);
672} 676}
673 677
678/* Minimum RTT in usec. ~0 means not available. */
679static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
680{
681 return tp->rtt_min[0].rtt;
682}
683
674/* Compute the actual receive window we are currently advertising. 684/* Compute the actual receive window we are currently advertising.
675 * Rcv_nxt can be after the window if our peer push more data 685 * Rcv_nxt can be after the window if our peer push more data
676 * than the offered window. 686 * than the offered window.
@@ -1206,7 +1216,8 @@ static inline int tcp_full_space(const struct sock *sk)
1206} 1216}
1207 1217
1208extern void tcp_openreq_init_rwin(struct request_sock *req, 1218extern void tcp_openreq_init_rwin(struct request_sock *req,
1209 struct sock *sk, struct dst_entry *dst); 1219 const struct sock *sk_listener,
1220 const struct dst_entry *dst);
1210 1221
1211void tcp_enter_memory_pressure(struct sock *sk); 1222void tcp_enter_memory_pressure(struct sock *sk);
1212 1223
@@ -1370,16 +1381,16 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1370 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp); 1381 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
1371int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, 1382int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1372 int family); 1383 int family);
1373struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, 1384struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1374 const struct sock *addr_sk); 1385 const struct sock *addr_sk);
1375 1386
1376#ifdef CONFIG_TCP_MD5SIG 1387#ifdef CONFIG_TCP_MD5SIG
1377struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, 1388struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1378 const union tcp_md5_addr *addr, 1389 const union tcp_md5_addr *addr,
1379 int family); 1390 int family);
1380#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) 1391#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1381#else 1392#else
1382static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, 1393static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1383 const union tcp_md5_addr *addr, 1394 const union tcp_md5_addr *addr,
1384 int family) 1395 int family)
1385{ 1396{
@@ -1420,10 +1431,10 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
1420 1431
1421extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; 1432extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1422int tcp_fastopen_reset_cipher(void *key, unsigned int len); 1433int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1423bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, 1434struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1424 struct request_sock *req, 1435 struct request_sock *req,
1425 struct tcp_fastopen_cookie *foc, 1436 struct tcp_fastopen_cookie *foc,
1426 struct dst_entry *dst); 1437 struct dst_entry *dst);
1427void tcp_fastopen_init_key_once(bool publish); 1438void tcp_fastopen_init_key_once(bool publish);
1428#define TCP_FASTOPEN_KEY_LENGTH 16 1439#define TCP_FASTOPEN_KEY_LENGTH 16
1429 1440
@@ -1618,7 +1629,6 @@ static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1618/* /proc */ 1629/* /proc */
1619enum tcp_seq_states { 1630enum tcp_seq_states {
1620 TCP_SEQ_STATE_LISTENING, 1631 TCP_SEQ_STATE_LISTENING,
1621 TCP_SEQ_STATE_OPENREQ,
1622 TCP_SEQ_STATE_ESTABLISHED, 1632 TCP_SEQ_STATE_ESTABLISHED,
1623}; 1633};
1624 1634
@@ -1637,7 +1647,6 @@ struct tcp_iter_state {
1637 enum tcp_seq_states state; 1647 enum tcp_seq_states state;
1638 struct sock *syn_wait_sk; 1648 struct sock *syn_wait_sk;
1639 int bucket, offset, sbucket, num; 1649 int bucket, offset, sbucket, num;
1640 kuid_t uid;
1641 loff_t last_pos; 1650 loff_t last_pos;
1642}; 1651};
1643 1652
@@ -1674,7 +1683,7 @@ int tcp4_proc_init(void);
1674void tcp4_proc_exit(void); 1683void tcp4_proc_exit(void);
1675#endif 1684#endif
1676 1685
1677int tcp_rtx_synack(struct sock *sk, struct request_sock *req); 1686int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1678int tcp_conn_request(struct request_sock_ops *rsk_ops, 1687int tcp_conn_request(struct request_sock_ops *rsk_ops,
1679 const struct tcp_request_sock_ops *af_ops, 1688 const struct tcp_request_sock_ops *af_ops,
1680 struct sock *sk, struct sk_buff *skb); 1689 struct sock *sk, struct sk_buff *skb);
@@ -1682,7 +1691,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
1682/* TCP af-specific functions */ 1691/* TCP af-specific functions */
1683struct tcp_sock_af_ops { 1692struct tcp_sock_af_ops {
1684#ifdef CONFIG_TCP_MD5SIG 1693#ifdef CONFIG_TCP_MD5SIG
1685 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, 1694 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
1686 const struct sock *addr_sk); 1695 const struct sock *addr_sk);
1687 int (*calc_md5_hash)(char *location, 1696 int (*calc_md5_hash)(char *location,
1688 const struct tcp_md5sig_key *md5, 1697 const struct tcp_md5sig_key *md5,
@@ -1697,40 +1706,42 @@ struct tcp_sock_af_ops {
1697struct tcp_request_sock_ops { 1706struct tcp_request_sock_ops {
1698 u16 mss_clamp; 1707 u16 mss_clamp;
1699#ifdef CONFIG_TCP_MD5SIG 1708#ifdef CONFIG_TCP_MD5SIG
1700 struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk, 1709 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1701 const struct sock *addr_sk); 1710 const struct sock *addr_sk);
1702 int (*calc_md5_hash) (char *location, 1711 int (*calc_md5_hash) (char *location,
1703 const struct tcp_md5sig_key *md5, 1712 const struct tcp_md5sig_key *md5,
1704 const struct sock *sk, 1713 const struct sock *sk,
1705 const struct sk_buff *skb); 1714 const struct sk_buff *skb);
1706#endif 1715#endif
1707 void (*init_req)(struct request_sock *req, struct sock *sk, 1716 void (*init_req)(struct request_sock *req,
1717 const struct sock *sk_listener,
1708 struct sk_buff *skb); 1718 struct sk_buff *skb);
1709#ifdef CONFIG_SYN_COOKIES 1719#ifdef CONFIG_SYN_COOKIES
1710 __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb, 1720 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
1711 __u16 *mss); 1721 __u16 *mss);
1712#endif 1722#endif
1713 struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl, 1723 struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1714 const struct request_sock *req, 1724 const struct request_sock *req,
1715 bool *strict); 1725 bool *strict);
1716 __u32 (*init_seq)(const struct sk_buff *skb); 1726 __u32 (*init_seq)(const struct sk_buff *skb);
1717 int (*send_synack)(struct sock *sk, struct dst_entry *dst, 1727 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1718 struct flowi *fl, struct request_sock *req, 1728 struct flowi *fl, struct request_sock *req,
1719 u16 queue_mapping, struct tcp_fastopen_cookie *foc); 1729 struct tcp_fastopen_cookie *foc,
1720 void (*queue_hash_add)(struct sock *sk, struct request_sock *req, 1730 bool attach_req);
1721 const unsigned long timeout);
1722}; 1731};
1723 1732
1724#ifdef CONFIG_SYN_COOKIES 1733#ifdef CONFIG_SYN_COOKIES
1725static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 1734static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1726 struct sock *sk, struct sk_buff *skb, 1735 const struct sock *sk, struct sk_buff *skb,
1727 __u16 *mss) 1736 __u16 *mss)
1728{ 1737{
1729 return ops->cookie_init_seq(sk, skb, mss); 1738 tcp_synq_overflow(sk);
1739 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
1740 return ops->cookie_init_seq(skb, mss);
1730} 1741}
1731#else 1742#else
1732static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 1743static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1733 struct sock *sk, struct sk_buff *skb, 1744 const struct sock *sk, struct sk_buff *skb,
1734 __u16 *mss) 1745 __u16 *mss)
1735{ 1746{
1736 return 0; 1747 return 0;
@@ -1742,6 +1753,19 @@ int tcpv4_offload_init(void);
1742void tcp_v4_init(void); 1753void tcp_v4_init(void);
1743void tcp_init(void); 1754void tcp_init(void);
1744 1755
1756/* tcp_recovery.c */
1757
1758/* Flags to enable various loss recovery features. See below */
1759extern int sysctl_tcp_recovery;
1760
1761/* Use TCP RACK to detect (some) tail and retransmit losses */
1762#define TCP_RACK_LOST_RETRANS 0x1
1763
1764extern int tcp_rack_mark_lost(struct sock *sk);
1765
1766extern void tcp_rack_advance(struct tcp_sock *tp,
1767 const struct skb_mstamp *xmit_time, u8 sacked);
1768
1745/* 1769/*
1746 * Save and compile IPv4 options, return a pointer to it 1770 * Save and compile IPv4 options, return a pointer to it
1747 */ 1771 */
diff --git a/include/net/tso.h b/include/net/tso.h
index 47e5444f7d15..b7be852bfe9d 100644
--- a/include/net/tso.h
+++ b/include/net/tso.h
@@ -8,6 +8,7 @@ struct tso_t {
8 void *data; 8 void *data;
9 size_t size; 9 size_t size;
10 u16 ip_id; 10 u16 ip_id;
11 bool ipv6;
11 u32 tcp_seq; 12 u32 tcp_seq;
12}; 13};
13 14
diff --git a/include/net/vrf.h b/include/net/vrf.h
deleted file mode 100644
index 593e6094ddd4..000000000000
--- a/include/net/vrf.h
+++ /dev/null
@@ -1,178 +0,0 @@
1/*
2 * include/net/net_vrf.h - adds vrf dev structure definitions
3 * Copyright (c) 2015 Cumulus Networks
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#ifndef __LINUX_NET_VRF_H
12#define __LINUX_NET_VRF_H
13
14struct net_vrf_dev {
15 struct rcu_head rcu;
16 int ifindex; /* ifindex of master dev */
17 u32 tb_id; /* table id for VRF */
18};
19
20struct slave {
21 struct list_head list;
22 struct net_device *dev;
23};
24
25struct slave_queue {
26 struct list_head all_slaves;
27};
28
29struct net_vrf {
30 struct slave_queue queue;
31 struct rtable *rth;
32 u32 tb_id;
33};
34
35
36#if IS_ENABLED(CONFIG_NET_VRF)
37/* called with rcu_read_lock() */
38static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
39{
40 struct net_vrf_dev *vrf_ptr;
41 int ifindex = 0;
42
43 if (!dev)
44 return 0;
45
46 if (netif_is_vrf(dev)) {
47 ifindex = dev->ifindex;
48 } else {
49 vrf_ptr = rcu_dereference(dev->vrf_ptr);
50 if (vrf_ptr)
51 ifindex = vrf_ptr->ifindex;
52 }
53
54 return ifindex;
55}
56
57static inline int vrf_master_ifindex(const struct net_device *dev)
58{
59 int ifindex;
60
61 rcu_read_lock();
62 ifindex = vrf_master_ifindex_rcu(dev);
63 rcu_read_unlock();
64
65 return ifindex;
66}
67
68/* called with rcu_read_lock */
69static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
70{
71 u32 tb_id = 0;
72
73 if (dev) {
74 struct net_vrf_dev *vrf_ptr;
75
76 vrf_ptr = rcu_dereference(dev->vrf_ptr);
77 if (vrf_ptr)
78 tb_id = vrf_ptr->tb_id;
79 }
80 return tb_id;
81}
82
83static inline u32 vrf_dev_table(const struct net_device *dev)
84{
85 u32 tb_id;
86
87 rcu_read_lock();
88 tb_id = vrf_dev_table_rcu(dev);
89 rcu_read_unlock();
90
91 return tb_id;
92}
93
94static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
95{
96 struct net_device *dev;
97 u32 tb_id = 0;
98
99 if (!ifindex)
100 return 0;
101
102 rcu_read_lock();
103
104 dev = dev_get_by_index_rcu(net, ifindex);
105 if (dev)
106 tb_id = vrf_dev_table_rcu(dev);
107
108 rcu_read_unlock();
109
110 return tb_id;
111}
112
113/* called with rtnl */
114static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
115{
116 u32 tb_id = 0;
117
118 if (dev) {
119 struct net_vrf_dev *vrf_ptr;
120
121 vrf_ptr = rtnl_dereference(dev->vrf_ptr);
122 if (vrf_ptr)
123 tb_id = vrf_ptr->tb_id;
124 }
125 return tb_id;
126}
127
128/* caller has already checked netif_is_vrf(dev) */
129static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
130{
131 struct rtable *rth = ERR_PTR(-ENETUNREACH);
132 struct net_vrf *vrf = netdev_priv(dev);
133
134 if (vrf) {
135 rth = vrf->rth;
136 atomic_inc(&rth->dst.__refcnt);
137 }
138 return rth;
139}
140
141#else
142static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
143{
144 return 0;
145}
146
147static inline int vrf_master_ifindex(const struct net_device *dev)
148{
149 return 0;
150}
151
152static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
153{
154 return 0;
155}
156
157static inline u32 vrf_dev_table(const struct net_device *dev)
158{
159 return 0;
160}
161
162static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
163{
164 return 0;
165}
166
167static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
168{
169 return 0;
170}
171
172static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
173{
174 return ERR_PTR(-ENETUNREACH);
175}
176#endif
177
178#endif /* __LINUX_NET_VRF_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 480a319b4c92..c1c899c3a51b 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -152,7 +152,10 @@ struct vxlan_config {
152struct vxlan_dev { 152struct vxlan_dev {
153 struct hlist_node hlist; /* vni hash table */ 153 struct hlist_node hlist; /* vni hash table */
154 struct list_head next; /* vxlan's per namespace list */ 154 struct list_head next; /* vxlan's per namespace list */
155 struct vxlan_sock *vn_sock; /* listening socket */ 155 struct vxlan_sock *vn4_sock; /* listening socket for IPv4 */
156#if IS_ENABLED(CONFIG_IPV6)
157 struct vxlan_sock *vn6_sock; /* listening socket for IPv6 */
158#endif
156 struct net_device *dev; 159 struct net_device *dev;
157 struct net *net; /* netns for packet i/o */ 160 struct net *net; /* netns for packet i/o */
158 struct vxlan_rdst default_dst; /* default destination */ 161 struct vxlan_rdst default_dst; /* default destination */
@@ -195,9 +198,14 @@ struct vxlan_dev {
195struct net_device *vxlan_dev_create(struct net *net, const char *name, 198struct net_device *vxlan_dev_create(struct net *net, const char *name,
196 u8 name_assign_type, struct vxlan_config *conf); 199 u8 name_assign_type, struct vxlan_config *conf);
197 200
198static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan) 201static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan,
202 unsigned short family)
199{ 203{
200 return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport; 204#if IS_ENABLED(CONFIG_IPV6)
205 if (family == AF_INET6)
206 return inet_sk(vxlan->vn6_sock->sock->sk)->inet_sport;
207#endif
208 return inet_sk(vxlan->vn4_sock->sock->sk)->inet_sport;
201} 209}
202 210
203static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, 211static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 312e3fee9ccf..4a9c21f9b4ea 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -296,8 +296,6 @@ struct xfrm_policy_afinfo {
296 struct flowi *fl, 296 struct flowi *fl,
297 int reverse); 297 int reverse);
298 int (*get_tos)(const struct flowi *fl); 298 int (*get_tos)(const struct flowi *fl);
299 void (*init_dst)(struct net *net,
300 struct xfrm_dst *dst);
301 int (*init_path)(struct xfrm_dst *path, 299 int (*init_path)(struct xfrm_dst *path,
302 struct dst_entry *dst, 300 struct dst_entry *dst,
303 int nfheader_len); 301 int nfheader_len);
@@ -335,7 +333,7 @@ struct xfrm_state_afinfo {
335 const xfrm_address_t *saddr); 333 const xfrm_address_t *saddr);
336 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 334 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
337 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); 335 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
338 int (*output)(struct sock *sk, struct sk_buff *skb); 336 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
339 int (*output_finish)(struct sock *sk, struct sk_buff *skb); 337 int (*output_finish)(struct sock *sk, struct sk_buff *skb);
340 int (*extract_input)(struct xfrm_state *x, 338 int (*extract_input)(struct xfrm_state *x,
341 struct sk_buff *skb); 339 struct sk_buff *skb);
@@ -1529,7 +1527,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1529 1527
1530int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1528int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1531int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); 1529int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1532int xfrm4_output(struct sock *sk, struct sk_buff *skb); 1530int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1533int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb); 1531int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
1534int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err); 1532int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
1535int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); 1533int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
@@ -1554,7 +1552,7 @@ __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1554__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); 1552__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1555int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1553int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1556int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); 1554int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1557int xfrm6_output(struct sock *sk, struct sk_buff *skb); 1555int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1558int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb); 1556int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
1559int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 1557int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1560 u8 **prevhdr); 1558 u8 **prevhdr);