aboutsummaryrefslogtreecommitdiffstats
path: root/net/netfilter
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-08-20 16:30:54 -0400
committerDavid S. Miller <davem@davemloft.net>2013-08-20 16:30:54 -0400
commit89d5e23210f53ab53b7ff64843bce62a106d454f (patch)
tree1be286a315bb017259da19eb1a5e92c5110dbc3b /net/netfilter
parent15ec80f5d4ad4d62ba3f19c90b3c995690b02103 (diff)
parent38c67328ac79cb9eaf61b5d4750fe3b9cff0dd15 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Conflicts: net/netfilter/nf_conntrack_proto_tcp.c The conflict had to do with overlapping changes dealing with fixing the use of an "s32" to hold the value returned by NAT_OFFSET(). Pablo Neira Ayuso says: ==================== The following batch contains Netfilter/IPVS updates for your net-next tree. More specifically, they are: * Trivial typo fix in xt_addrtype, from Phil Oester. * Remove net_ratelimit in the conntrack logging for consistency with other logging subsystem, from Patrick McHardy. * Remove unneeded includes from the recently added xt_connlabel support, from Florian Westphal. * Allow to update conntracks via nfqueue, don't need NFQA_CFG_F_CONNTRACK for this, from Florian Westphal. * Remove tproxy core, now that we have socket early demux, from Florian Westphal. * A couple of patches to refactor conntrack event reporting to save a good bunch of lines, from Florian Westphal. * Fix missing locking in NAT sequence adjustment, it did not manifested in any known bug so far, from Patrick McHardy. * Change sequence number adjustment variable to 32 bits, to delay the possible early overflow in long standing connections, also from Patrick. * Comestic cleanups for IPVS, from Dragos Foianu. * Fix possible null dereference in IPVS in the SH scheduler, from Daniel Borkmann. * Allow to attach conntrack expectations via nfqueue. Before this patch, you had to use ctnetlink instead, thus, we save the conntrack lookup. * Export xt_rpfilter and xt_HMARK header files, from Nicolas Dichtel. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netfilter')
-rw-r--r--net/netfilter/Kconfig22
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/core.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c69
-rw-r--r--net/netfilter/nf_conntrack_labels.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c269
-rw-r--r--net/netfilter/nf_conntrack_proto.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_nat_core.c6
-rw-r--r--net/netfilter/nf_nat_helper.c28
-rw-r--r--net/netfilter/nf_tproxy_core.c62
-rw-r--r--net/netfilter/nfnetlink_queue_core.c11
-rw-r--r--net/netfilter/nfnetlink_queue_ct.c15
-rw-r--r--net/netfilter/xt_TPROXY.c169
-rw-r--r--net/netfilter/xt_addrtype.c2
-rw-r--r--net/netfilter/xt_socket.c66
18 files changed, 484 insertions, 271 deletions
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 56d22cae5906..c45fc1a60e0d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -410,20 +410,6 @@ config NF_NAT_TFTP
410 410
411endif # NF_CONNTRACK 411endif # NF_CONNTRACK
412 412
413# transparent proxy support
414config NETFILTER_TPROXY
415 tristate "Transparent proxying support"
416 depends on IP_NF_MANGLE
417 depends on NETFILTER_ADVANCED
418 help
419 This option enables transparent proxying support, that is,
420 support for handling non-locally bound IPv4 TCP and UDP sockets.
421 For it to work you will have to configure certain iptables rules
422 and use policy routing. For more information on how to set it up
423 see Documentation/networking/tproxy.txt.
424
425 To compile it as a module, choose M here. If unsure, say N.
426
427config NETFILTER_XTABLES 413config NETFILTER_XTABLES
428 tristate "Netfilter Xtables support (required for ip_tables)" 414 tristate "Netfilter Xtables support (required for ip_tables)"
429 default m if NETFILTER_ADVANCED=n 415 default m if NETFILTER_ADVANCED=n
@@ -720,10 +706,10 @@ config NETFILTER_XT_TARGET_TEE
720 this clone be rerouted to another nexthop. 706 this clone be rerouted to another nexthop.
721 707
722config NETFILTER_XT_TARGET_TPROXY 708config NETFILTER_XT_TARGET_TPROXY
723 tristate '"TPROXY" target support' 709 tristate '"TPROXY" target transparent proxying support'
724 depends on NETFILTER_TPROXY
725 depends on NETFILTER_XTABLES 710 depends on NETFILTER_XTABLES
726 depends on NETFILTER_ADVANCED 711 depends on NETFILTER_ADVANCED
712 depends on IP_NF_MANGLE
727 select NF_DEFRAG_IPV4 713 select NF_DEFRAG_IPV4
728 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 714 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
729 help 715 help
@@ -731,6 +717,9 @@ config NETFILTER_XT_TARGET_TPROXY
731 REDIRECT. It can only be used in the mangle table and is useful 717 REDIRECT. It can only be used in the mangle table and is useful
732 to redirect traffic to a transparent proxy. It does _not_ depend 718 to redirect traffic to a transparent proxy. It does _not_ depend
733 on Netfilter connection tracking and NAT, unlike REDIRECT. 719 on Netfilter connection tracking and NAT, unlike REDIRECT.
720 For it to work you will have to configure certain iptables rules
721 and use policy routing. For more information on how to set it up
722 see Documentation/networking/tproxy.txt.
734 723
735 To compile it as a module, choose M here. If unsure, say N. 724 To compile it as a module, choose M here. If unsure, say N.
736 725
@@ -1180,7 +1169,6 @@ config NETFILTER_XT_MATCH_SCTP
1180 1169
1181config NETFILTER_XT_MATCH_SOCKET 1170config NETFILTER_XT_MATCH_SOCKET
1182 tristate '"socket" match support' 1171 tristate '"socket" match support'
1183 depends on NETFILTER_TPROXY
1184 depends on NETFILTER_XTABLES 1172 depends on NETFILTER_XTABLES
1185 depends on NETFILTER_ADVANCED 1173 depends on NETFILTER_ADVANCED
1186 depends on !NF_CONNTRACK || NF_CONNTRACK 1174 depends on !NF_CONNTRACK || NF_CONNTRACK
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index a1abf87d43bf..ebfa7dc747cd 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -61,9 +61,6 @@ obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
61obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o 61obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
62obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o 62obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
63 63
64# transparent proxy support
65obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
66
67# generic X tables 64# generic X tables
68obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o 65obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
69 66
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 2217363ab422..593b16ea45e0 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -234,12 +234,13 @@ EXPORT_SYMBOL(skb_make_writable);
234/* This does not belong here, but locally generated errors need it if connection 234/* This does not belong here, but locally generated errors need it if connection
235 tracking in use: without this, connection may not be in hash table, and hence 235 tracking in use: without this, connection may not be in hash table, and hence
236 manufactured ICMP or RST packets will not be associated with it. */ 236 manufactured ICMP or RST packets will not be associated with it. */
237void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly; 237void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
238 __rcu __read_mostly;
238EXPORT_SYMBOL(ip_ct_attach); 239EXPORT_SYMBOL(ip_ct_attach);
239 240
240void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) 241void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
241{ 242{
242 void (*attach)(struct sk_buff *, struct sk_buff *); 243 void (*attach)(struct sk_buff *, const struct sk_buff *);
243 244
244 if (skb->nfct) { 245 if (skb->nfct) {
245 rcu_read_lock(); 246 rcu_read_lock();
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 3cd85b2fc67c..5199448697f6 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -414,7 +414,7 @@ static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
414 414
415 spin_lock_bh(&svc->sched_lock); 415 spin_lock_bh(&svc->sched_lock);
416 tbl->dead = 1; 416 tbl->dead = 1;
417 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 417 for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
418 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { 418 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
419 ip_vs_lblcr_free(en); 419 ip_vs_lblcr_free(en);
420 } 420 }
@@ -440,7 +440,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
440 struct ip_vs_lblcr_entry *en; 440 struct ip_vs_lblcr_entry *en;
441 struct hlist_node *next; 441 struct hlist_node *next;
442 442
443 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 443 for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
444 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 444 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
445 445
446 spin_lock(&svc->sched_lock); 446 spin_lock(&svc->sched_lock);
@@ -495,7 +495,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
495 if (goal > tbl->max_size/2) 495 if (goal > tbl->max_size/2)
496 goal = tbl->max_size/2; 496 goal = tbl->max_size/2;
497 497
498 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 498 for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
499 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 499 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
500 500
501 spin_lock(&svc->sched_lock); 501 spin_lock(&svc->sched_lock);
@@ -536,7 +536,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
536 /* 536 /*
537 * Initialize the hash buckets 537 * Initialize the hash buckets
538 */ 538 */
539 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 539 for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
540 INIT_HLIST_HEAD(&tbl->bucket[i]); 540 INIT_HLIST_HEAD(&tbl->bucket[i]);
541 } 541 }
542 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; 542 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index f16c027df15b..3588faebe529 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -269,14 +269,20 @@ ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
269 switch (iph->protocol) { 269 switch (iph->protocol) {
270 case IPPROTO_TCP: 270 case IPPROTO_TCP:
271 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph); 271 th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
272 if (unlikely(th == NULL))
273 return 0;
272 port = th->source; 274 port = th->source;
273 break; 275 break;
274 case IPPROTO_UDP: 276 case IPPROTO_UDP:
275 uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph); 277 uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
278 if (unlikely(uh == NULL))
279 return 0;
276 port = uh->source; 280 port = uh->source;
277 break; 281 break;
278 case IPPROTO_SCTP: 282 case IPPROTO_SCTP:
279 sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph); 283 sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
284 if (unlikely(sh == NULL))
285 return 0;
280 port = sh->source; 286 port = sh->source;
281 break; 287 break;
282 default: 288 default:
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0283baedcdfb..da6f1787a102 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -238,7 +238,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
238 nf_conntrack_free(ct); 238 nf_conntrack_free(ct);
239} 239}
240 240
241void nf_ct_delete_from_lists(struct nf_conn *ct) 241static void nf_ct_delete_from_lists(struct nf_conn *ct)
242{ 242{
243 struct net *net = nf_ct_net(ct); 243 struct net *net = nf_ct_net(ct);
244 244
@@ -253,7 +253,6 @@ void nf_ct_delete_from_lists(struct nf_conn *ct)
253 &net->ct.dying); 253 &net->ct.dying);
254 spin_unlock_bh(&nf_conntrack_lock); 254 spin_unlock_bh(&nf_conntrack_lock);
255} 255}
256EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
257 256
258static void death_by_event(unsigned long ul_conntrack) 257static void death_by_event(unsigned long ul_conntrack)
259{ 258{
@@ -275,7 +274,7 @@ static void death_by_event(unsigned long ul_conntrack)
275 nf_ct_put(ct); 274 nf_ct_put(ct);
276} 275}
277 276
278void nf_ct_dying_timeout(struct nf_conn *ct) 277static void nf_ct_dying_timeout(struct nf_conn *ct)
279{ 278{
280 struct net *net = nf_ct_net(ct); 279 struct net *net = nf_ct_net(ct);
281 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); 280 struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
@@ -288,27 +287,33 @@ void nf_ct_dying_timeout(struct nf_conn *ct)
288 (prandom_u32() % net->ct.sysctl_events_retry_timeout); 287 (prandom_u32() % net->ct.sysctl_events_retry_timeout);
289 add_timer(&ecache->timeout); 288 add_timer(&ecache->timeout);
290} 289}
291EXPORT_SYMBOL_GPL(nf_ct_dying_timeout);
292 290
293static void death_by_timeout(unsigned long ul_conntrack) 291bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
294{ 292{
295 struct nf_conn *ct = (void *)ul_conntrack;
296 struct nf_conn_tstamp *tstamp; 293 struct nf_conn_tstamp *tstamp;
297 294
298 tstamp = nf_conn_tstamp_find(ct); 295 tstamp = nf_conn_tstamp_find(ct);
299 if (tstamp && tstamp->stop == 0) 296 if (tstamp && tstamp->stop == 0)
300 tstamp->stop = ktime_to_ns(ktime_get_real()); 297 tstamp->stop = ktime_to_ns(ktime_get_real());
301 298
302 if (!test_bit(IPS_DYING_BIT, &ct->status) && 299 if (!nf_ct_is_dying(ct) &&
303 unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) { 300 unlikely(nf_conntrack_event_report(IPCT_DESTROY, ct,
301 portid, report) < 0)) {
304 /* destroy event was not delivered */ 302 /* destroy event was not delivered */
305 nf_ct_delete_from_lists(ct); 303 nf_ct_delete_from_lists(ct);
306 nf_ct_dying_timeout(ct); 304 nf_ct_dying_timeout(ct);
307 return; 305 return false;
308 } 306 }
309 set_bit(IPS_DYING_BIT, &ct->status); 307 set_bit(IPS_DYING_BIT, &ct->status);
310 nf_ct_delete_from_lists(ct); 308 nf_ct_delete_from_lists(ct);
311 nf_ct_put(ct); 309 nf_ct_put(ct);
310 return true;
311}
312EXPORT_SYMBOL_GPL(nf_ct_delete);
313
314static void death_by_timeout(unsigned long ul_conntrack)
315{
316 nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
312} 317}
313 318
314/* 319/*
@@ -643,10 +648,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
643 return dropped; 648 return dropped;
644 649
645 if (del_timer(&ct->timeout)) { 650 if (del_timer(&ct->timeout)) {
646 death_by_timeout((unsigned long)ct); 651 if (nf_ct_delete(ct, 0, 0)) {
647 /* Check if we indeed killed this entry. Reliable event
648 delivery may have inserted it into the dying list. */
649 if (test_bit(IPS_DYING_BIT, &ct->status)) {
650 dropped = 1; 652 dropped = 1;
651 NF_CT_STAT_INC_ATOMIC(net, early_drop); 653 NF_CT_STAT_INC_ATOMIC(net, early_drop);
652 } 654 }
@@ -1192,7 +1194,7 @@ EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1192#endif 1194#endif
1193 1195
1194/* Used by ipt_REJECT and ip6t_REJECT. */ 1196/* Used by ipt_REJECT and ip6t_REJECT. */
1195static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) 1197static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1196{ 1198{
1197 struct nf_conn *ct; 1199 struct nf_conn *ct;
1198 enum ip_conntrack_info ctinfo; 1200 enum ip_conntrack_info ctinfo;
@@ -1244,7 +1246,7 @@ found:
1244 1246
1245void nf_ct_iterate_cleanup(struct net *net, 1247void nf_ct_iterate_cleanup(struct net *net,
1246 int (*iter)(struct nf_conn *i, void *data), 1248 int (*iter)(struct nf_conn *i, void *data),
1247 void *data) 1249 void *data, u32 portid, int report)
1248{ 1250{
1249 struct nf_conn *ct; 1251 struct nf_conn *ct;
1250 unsigned int bucket = 0; 1252 unsigned int bucket = 0;
@@ -1252,7 +1254,8 @@ void nf_ct_iterate_cleanup(struct net *net,
1252 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { 1254 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1253 /* Time to push up daises... */ 1255 /* Time to push up daises... */
1254 if (del_timer(&ct->timeout)) 1256 if (del_timer(&ct->timeout))
1255 death_by_timeout((unsigned long)ct); 1257 nf_ct_delete(ct, portid, report);
1258
1256 /* ... else the timer will get him soon. */ 1259 /* ... else the timer will get him soon. */
1257 1260
1258 nf_ct_put(ct); 1261 nf_ct_put(ct);
@@ -1260,30 +1263,6 @@ void nf_ct_iterate_cleanup(struct net *net,
1260} 1263}
1261EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); 1264EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1262 1265
1263struct __nf_ct_flush_report {
1264 u32 portid;
1265 int report;
1266};
1267
1268static int kill_report(struct nf_conn *i, void *data)
1269{
1270 struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1271 struct nf_conn_tstamp *tstamp;
1272
1273 tstamp = nf_conn_tstamp_find(i);
1274 if (tstamp && tstamp->stop == 0)
1275 tstamp->stop = ktime_to_ns(ktime_get_real());
1276
1277 /* If we fail to deliver the event, death_by_timeout() will retry */
1278 if (nf_conntrack_event_report(IPCT_DESTROY, i,
1279 fr->portid, fr->report) < 0)
1280 return 1;
1281
1282 /* Avoid the delivery of the destroy event in death_by_timeout(). */
1283 set_bit(IPS_DYING_BIT, &i->status);
1284 return 1;
1285}
1286
1287static int kill_all(struct nf_conn *i, void *data) 1266static int kill_all(struct nf_conn *i, void *data)
1288{ 1267{
1289 return 1; 1268 return 1;
@@ -1301,11 +1280,7 @@ EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1301 1280
1302void nf_conntrack_flush_report(struct net *net, u32 portid, int report) 1281void nf_conntrack_flush_report(struct net *net, u32 portid, int report)
1303{ 1282{
1304 struct __nf_ct_flush_report fr = { 1283 nf_ct_iterate_cleanup(net, kill_all, NULL, portid, report);
1305 .portid = portid,
1306 .report = report,
1307 };
1308 nf_ct_iterate_cleanup(net, kill_report, &fr);
1309} 1284}
1310EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); 1285EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
1311 1286
@@ -1386,7 +1361,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1386i_see_dead_people: 1361i_see_dead_people:
1387 busy = 0; 1362 busy = 0;
1388 list_for_each_entry(net, net_exit_list, exit_list) { 1363 list_for_each_entry(net, net_exit_list, exit_list) {
1389 nf_ct_iterate_cleanup(net, kill_all, NULL); 1364 nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
1390 nf_ct_release_dying_list(net); 1365 nf_ct_release_dying_list(net);
1391 if (atomic_read(&net->ct.count) != 0) 1366 if (atomic_read(&net->ct.count) != 0)
1392 busy = 1; 1367 busy = 1;
@@ -1692,7 +1667,7 @@ err_stat:
1692 return ret; 1667 return ret;
1693} 1668}
1694 1669
1695s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, 1670s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1696 enum ip_conntrack_dir dir, 1671 enum ip_conntrack_dir dir,
1697 u32 seq); 1672 u32 seq);
1698EXPORT_SYMBOL_GPL(nf_ct_nat_offset); 1673EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index 355d2ef08094..bb53f120e79c 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -8,12 +8,8 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/ctype.h>
12#include <linux/export.h> 11#include <linux/export.h>
13#include <linux/jhash.h>
14#include <linux/spinlock.h>
15#include <linux/types.h> 12#include <linux/types.h>
16#include <linux/slab.h>
17 13
18#include <net/netfilter/nf_conntrack_ecache.h> 14#include <net/netfilter/nf_conntrack_ecache.h>
19#include <net/netfilter/nf_conntrack_labels.h> 15#include <net/netfilter/nf_conntrack_labels.h>
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index edc410e778f7..fa61fea63234 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1038,21 +1038,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
1038 } 1038 }
1039 } 1039 }
1040 1040
1041 if (del_timer(&ct->timeout)) { 1041 if (del_timer(&ct->timeout))
1042 if (nf_conntrack_event_report(IPCT_DESTROY, ct, 1042 nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
1043 NETLINK_CB(skb).portid, 1043
1044 nlmsg_report(nlh)) < 0) {
1045 nf_ct_delete_from_lists(ct);
1046 /* we failed to report the event, try later */
1047 nf_ct_dying_timeout(ct);
1048 nf_ct_put(ct);
1049 return 0;
1050 }
1051 /* death_by_timeout would report the event again */
1052 set_bit(IPS_DYING_BIT, &ct->status);
1053 nf_ct_delete_from_lists(ct);
1054 nf_ct_put(ct);
1055 }
1056 nf_ct_put(ct); 1044 nf_ct_put(ct);
1057 1045
1058 return 0; 1046 return 0;
@@ -1999,6 +1987,27 @@ out:
1999 return err == -EAGAIN ? -ENOBUFS : err; 1987 return err == -EAGAIN ? -ENOBUFS : err;
2000} 1988}
2001 1989
1990static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
1991 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
1992 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
1993 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
1994 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
1995 [CTA_EXPECT_ID] = { .type = NLA_U32 },
1996 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
1997 .len = NF_CT_HELPER_NAME_LEN - 1 },
1998 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
1999 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
2000 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
2001 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
2002 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
2003};
2004
2005static struct nf_conntrack_expect *
2006ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
2007 struct nf_conntrack_helper *helper,
2008 struct nf_conntrack_tuple *tuple,
2009 struct nf_conntrack_tuple *mask);
2010
2002#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT 2011#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2003static size_t 2012static size_t
2004ctnetlink_nfqueue_build_size(const struct nf_conn *ct) 2013ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
@@ -2139,10 +2148,69 @@ ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
2139 return ret; 2148 return ret;
2140} 2149}
2141 2150
2151static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
2152 const struct nf_conn *ct,
2153 struct nf_conntrack_tuple *tuple,
2154 struct nf_conntrack_tuple *mask)
2155{
2156 int err;
2157
2158 err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
2159 nf_ct_l3num(ct));
2160 if (err < 0)
2161 return err;
2162
2163 return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
2164 nf_ct_l3num(ct));
2165}
2166
2167static int
2168ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2169 u32 portid, u32 report)
2170{
2171 struct nlattr *cda[CTA_EXPECT_MAX+1];
2172 struct nf_conntrack_tuple tuple, mask;
2173 struct nf_conntrack_helper *helper;
2174 struct nf_conntrack_expect *exp;
2175 int err;
2176
2177 err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy);
2178 if (err < 0)
2179 return err;
2180
2181 err = ctnetlink_nfqueue_exp_parse((const struct nlattr * const *)cda,
2182 ct, &tuple, &mask);
2183 if (err < 0)
2184 return err;
2185
2186 if (cda[CTA_EXPECT_HELP_NAME]) {
2187 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2188
2189 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2190 nf_ct_protonum(ct));
2191 if (helper == NULL)
2192 return -EOPNOTSUPP;
2193 }
2194
2195 exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
2196 helper, &tuple, &mask);
2197 if (IS_ERR(exp))
2198 return PTR_ERR(exp);
2199
2200 err = nf_ct_expect_related_report(exp, portid, report);
2201 if (err < 0) {
2202 nf_ct_expect_put(exp);
2203 return err;
2204 }
2205
2206 return 0;
2207}
2208
2142static struct nfq_ct_hook ctnetlink_nfqueue_hook = { 2209static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
2143 .build_size = ctnetlink_nfqueue_build_size, 2210 .build_size = ctnetlink_nfqueue_build_size,
2144 .build = ctnetlink_nfqueue_build, 2211 .build = ctnetlink_nfqueue_build,
2145 .parse = ctnetlink_nfqueue_parse, 2212 .parse = ctnetlink_nfqueue_parse,
2213 .attach_expect = ctnetlink_nfqueue_attach_expect,
2146}; 2214};
2147#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */ 2215#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
2148 2216
@@ -2510,21 +2578,6 @@ static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
2510 return err; 2578 return err;
2511} 2579}
2512 2580
2513static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2514 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
2515 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
2516 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
2517 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
2518 [CTA_EXPECT_ID] = { .type = NLA_U32 },
2519 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
2520 .len = NF_CT_HELPER_NAME_LEN - 1 },
2521 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
2522 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
2523 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
2524 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
2525 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
2526};
2527
2528static int 2581static int
2529ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, 2582ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
2530 const struct nlmsghdr *nlh, 2583 const struct nlmsghdr *nlh,
@@ -2747,76 +2800,26 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
2747#endif 2800#endif
2748} 2801}
2749 2802
2750static int 2803static struct nf_conntrack_expect *
2751ctnetlink_create_expect(struct net *net, u16 zone, 2804ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
2752 const struct nlattr * const cda[], 2805 struct nf_conntrack_helper *helper,
2753 u_int8_t u3, 2806 struct nf_conntrack_tuple *tuple,
2754 u32 portid, int report) 2807 struct nf_conntrack_tuple *mask)
2755{ 2808{
2756 struct nf_conntrack_tuple tuple, mask, master_tuple; 2809 u_int32_t class = 0;
2757 struct nf_conntrack_tuple_hash *h = NULL;
2758 struct nf_conntrack_expect *exp; 2810 struct nf_conntrack_expect *exp;
2759 struct nf_conn *ct;
2760 struct nf_conn_help *help; 2811 struct nf_conn_help *help;
2761 struct nf_conntrack_helper *helper = NULL; 2812 int err;
2762 u_int32_t class = 0;
2763 int err = 0;
2764
2765 /* caller guarantees that those three CTA_EXPECT_* exist */
2766 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2767 if (err < 0)
2768 return err;
2769 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
2770 if (err < 0)
2771 return err;
2772 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
2773 if (err < 0)
2774 return err;
2775
2776 /* Look for master conntrack of this expectation */
2777 h = nf_conntrack_find_get(net, zone, &master_tuple);
2778 if (!h)
2779 return -ENOENT;
2780 ct = nf_ct_tuplehash_to_ctrack(h);
2781
2782 /* Look for helper of this expectation */
2783 if (cda[CTA_EXPECT_HELP_NAME]) {
2784 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2785
2786 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2787 nf_ct_protonum(ct));
2788 if (helper == NULL) {
2789#ifdef CONFIG_MODULES
2790 if (request_module("nfct-helper-%s", helpname) < 0) {
2791 err = -EOPNOTSUPP;
2792 goto out;
2793 }
2794
2795 helper = __nf_conntrack_helper_find(helpname,
2796 nf_ct_l3num(ct),
2797 nf_ct_protonum(ct));
2798 if (helper) {
2799 err = -EAGAIN;
2800 goto out;
2801 }
2802#endif
2803 err = -EOPNOTSUPP;
2804 goto out;
2805 }
2806 }
2807 2813
2808 if (cda[CTA_EXPECT_CLASS] && helper) { 2814 if (cda[CTA_EXPECT_CLASS] && helper) {
2809 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS])); 2815 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
2810 if (class > helper->expect_class_max) { 2816 if (class > helper->expect_class_max)
2811 err = -EINVAL; 2817 return ERR_PTR(-EINVAL);
2812 goto out;
2813 }
2814 } 2818 }
2815 exp = nf_ct_expect_alloc(ct); 2819 exp = nf_ct_expect_alloc(ct);
2816 if (!exp) { 2820 if (!exp)
2817 err = -ENOMEM; 2821 return ERR_PTR(-ENOMEM);
2818 goto out; 2822
2819 }
2820 help = nfct_help(ct); 2823 help = nfct_help(ct);
2821 if (!help) { 2824 if (!help) {
2822 if (!cda[CTA_EXPECT_TIMEOUT]) { 2825 if (!cda[CTA_EXPECT_TIMEOUT]) {
@@ -2854,21 +2857,89 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2854 exp->class = class; 2857 exp->class = class;
2855 exp->master = ct; 2858 exp->master = ct;
2856 exp->helper = helper; 2859 exp->helper = helper;
2857 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple)); 2860 exp->tuple = *tuple;
2858 memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3)); 2861 exp->mask.src.u3 = mask->src.u3;
2859 exp->mask.src.u.all = mask.src.u.all; 2862 exp->mask.src.u.all = mask->src.u.all;
2860 2863
2861 if (cda[CTA_EXPECT_NAT]) { 2864 if (cda[CTA_EXPECT_NAT]) {
2862 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT], 2865 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
2863 exp, u3); 2866 exp, nf_ct_l3num(ct));
2864 if (err < 0) 2867 if (err < 0)
2865 goto err_out; 2868 goto err_out;
2866 } 2869 }
2867 err = nf_ct_expect_related_report(exp, portid, report); 2870 return exp;
2868err_out: 2871err_out:
2869 nf_ct_expect_put(exp); 2872 nf_ct_expect_put(exp);
2870out: 2873 return ERR_PTR(err);
2871 nf_ct_put(nf_ct_tuplehash_to_ctrack(h)); 2874}
2875
2876static int
2877ctnetlink_create_expect(struct net *net, u16 zone,
2878 const struct nlattr * const cda[],
2879 u_int8_t u3, u32 portid, int report)
2880{
2881 struct nf_conntrack_tuple tuple, mask, master_tuple;
2882 struct nf_conntrack_tuple_hash *h = NULL;
2883 struct nf_conntrack_helper *helper = NULL;
2884 struct nf_conntrack_expect *exp;
2885 struct nf_conn *ct;
2886 int err;
2887
2888 /* caller guarantees that those three CTA_EXPECT_* exist */
2889 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2890 if (err < 0)
2891 return err;
2892 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
2893 if (err < 0)
2894 return err;
2895 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
2896 if (err < 0)
2897 return err;
2898
2899 /* Look for master conntrack of this expectation */
2900 h = nf_conntrack_find_get(net, zone, &master_tuple);
2901 if (!h)
2902 return -ENOENT;
2903 ct = nf_ct_tuplehash_to_ctrack(h);
2904
2905 if (cda[CTA_EXPECT_HELP_NAME]) {
2906 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2907
2908 helper = __nf_conntrack_helper_find(helpname, u3,
2909 nf_ct_protonum(ct));
2910 if (helper == NULL) {
2911#ifdef CONFIG_MODULES
2912 if (request_module("nfct-helper-%s", helpname) < 0) {
2913 err = -EOPNOTSUPP;
2914 goto err_ct;
2915 }
2916 helper = __nf_conntrack_helper_find(helpname, u3,
2917 nf_ct_protonum(ct));
2918 if (helper) {
2919 err = -EAGAIN;
2920 goto err_ct;
2921 }
2922#endif
2923 err = -EOPNOTSUPP;
2924 goto err_ct;
2925 }
2926 }
2927
2928 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
2929 if (IS_ERR(exp)) {
2930 err = PTR_ERR(exp);
2931 goto err_ct;
2932 }
2933
2934 err = nf_ct_expect_related_report(exp, portid, report);
2935 if (err < 0)
2936 goto err_exp;
2937
2938 return 0;
2939err_exp:
2940 nf_ct_expect_put(exp);
2941err_ct:
2942 nf_ct_put(ct);
2872 return err; 2943 return err;
2873} 2944}
2874 2945
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 0ab9636ac57e..ce3004156eeb 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -281,7 +281,7 @@ void nf_ct_l3proto_pernet_unregister(struct net *net,
281 nf_ct_l3proto_unregister_sysctl(net, proto); 281 nf_ct_l3proto_unregister_sysctl(net, proto);
282 282
283 /* Remove all contrack entries for this protocol */ 283 /* Remove all contrack entries for this protocol */
284 nf_ct_iterate_cleanup(net, kill_l3proto, proto); 284 nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0);
285} 285}
286EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister); 286EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister);
287 287
@@ -476,7 +476,7 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
476 nf_ct_l4proto_unregister_sysctl(net, pn, l4proto); 476 nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
477 477
478 /* Remove all contrack entries for this protocol */ 478 /* Remove all contrack entries for this protocol */
479 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto); 479 nf_ct_iterate_cleanup(net, kill_l4proto, l4proto, 0, 0);
480} 480}
481EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister); 481EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister);
482 482
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 2f8010707d01..d224e001f14f 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -496,7 +496,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
496} 496}
497 497
498#ifdef CONFIG_NF_NAT_NEEDED 498#ifdef CONFIG_NF_NAT_NEEDED
499static inline s16 nat_offset(const struct nf_conn *ct, 499static inline s32 nat_offset(const struct nf_conn *ct,
500 enum ip_conntrack_dir dir, 500 enum ip_conntrack_dir dir,
501 u32 seq) 501 u32 seq)
502{ 502{
@@ -525,7 +525,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
525 struct ip_ct_tcp_state *receiver = &state->seen[!dir]; 525 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
526 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 526 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
527 __u32 seq, ack, sack, end, win, swin; 527 __u32 seq, ack, sack, end, win, swin;
528 s16 receiver_offset; 528 s32 receiver_offset;
529 bool res, in_recv_win; 529 bool res, in_recv_win;
530 530
531 /* 531 /*
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 038eee5c8f85..6ff808375b5e 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -497,7 +497,7 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
497 497
498 rtnl_lock(); 498 rtnl_lock();
499 for_each_net(net) 499 for_each_net(net)
500 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); 500 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
501 rtnl_unlock(); 501 rtnl_unlock();
502} 502}
503 503
@@ -511,7 +511,7 @@ static void nf_nat_l3proto_clean(u8 l3proto)
511 rtnl_lock(); 511 rtnl_lock();
512 512
513 for_each_net(net) 513 for_each_net(net)
514 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); 514 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
515 rtnl_unlock(); 515 rtnl_unlock();
516} 516}
517 517
@@ -749,7 +749,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
749{ 749{
750 struct nf_nat_proto_clean clean = {}; 750 struct nf_nat_proto_clean clean = {};
751 751
752 nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean); 752 nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
753 synchronize_rcu(); 753 synchronize_rcu();
754 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); 754 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
755} 755}
diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c
index 85e20a919081..46b9baa845a6 100644
--- a/net/netfilter/nf_nat_helper.c
+++ b/net/netfilter/nf_nat_helper.c
@@ -30,8 +30,6 @@
30 pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \ 30 pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
31 x->offset_before, x->offset_after, x->correction_pos); 31 x->offset_before, x->offset_after, x->correction_pos);
32 32
33static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
34
35/* Setup TCP sequence correction given this change at this sequence */ 33/* Setup TCP sequence correction given this change at this sequence */
36static inline void 34static inline void
37adjust_tcp_sequence(u32 seq, 35adjust_tcp_sequence(u32 seq,
@@ -49,7 +47,7 @@ adjust_tcp_sequence(u32 seq,
49 pr_debug("adjust_tcp_sequence: Seq_offset before: "); 47 pr_debug("adjust_tcp_sequence: Seq_offset before: ");
50 DUMP_OFFSET(this_way); 48 DUMP_OFFSET(this_way);
51 49
52 spin_lock_bh(&nf_nat_seqofs_lock); 50 spin_lock_bh(&ct->lock);
53 51
54 /* SYN adjust. If it's uninitialized, or this is after last 52 /* SYN adjust. If it's uninitialized, or this is after last
55 * correction, record it: we don't handle more than one 53 * correction, record it: we don't handle more than one
@@ -61,31 +59,26 @@ adjust_tcp_sequence(u32 seq,
61 this_way->offset_before = this_way->offset_after; 59 this_way->offset_before = this_way->offset_after;
62 this_way->offset_after += sizediff; 60 this_way->offset_after += sizediff;
63 } 61 }
64 spin_unlock_bh(&nf_nat_seqofs_lock); 62 spin_unlock_bh(&ct->lock);
65 63
66 pr_debug("adjust_tcp_sequence: Seq_offset after: "); 64 pr_debug("adjust_tcp_sequence: Seq_offset after: ");
67 DUMP_OFFSET(this_way); 65 DUMP_OFFSET(this_way);
68} 66}
69 67
70/* Get the offset value, for conntrack */ 68/* Get the offset value, for conntrack. Caller must have the conntrack locked */
71s16 nf_nat_get_offset(const struct nf_conn *ct, 69s32 nf_nat_get_offset(const struct nf_conn *ct,
72 enum ip_conntrack_dir dir, 70 enum ip_conntrack_dir dir,
73 u32 seq) 71 u32 seq)
74{ 72{
75 struct nf_conn_nat *nat = nfct_nat(ct); 73 struct nf_conn_nat *nat = nfct_nat(ct);
76 struct nf_nat_seq *this_way; 74 struct nf_nat_seq *this_way;
77 s16 offset;
78 75
79 if (!nat) 76 if (!nat)
80 return 0; 77 return 0;
81 78
82 this_way = &nat->seq[dir]; 79 this_way = &nat->seq[dir];
83 spin_lock_bh(&nf_nat_seqofs_lock); 80 return after(seq, this_way->correction_pos)
84 offset = after(seq, this_way->correction_pos)
85 ? this_way->offset_after : this_way->offset_before; 81 ? this_way->offset_after : this_way->offset_before;
86 spin_unlock_bh(&nf_nat_seqofs_lock);
87
88 return offset;
89} 82}
90 83
91/* Frobs data inside this packet, which is linear. */ 84/* Frobs data inside this packet, which is linear. */
@@ -143,7 +136,7 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
143} 136}
144 137
145void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo, 138void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
146 __be32 seq, s16 off) 139 __be32 seq, s32 off)
147{ 140{
148 if (!off) 141 if (!off)
149 return; 142 return;
@@ -370,9 +363,10 @@ nf_nat_seq_adjust(struct sk_buff *skb,
370 struct tcphdr *tcph; 363 struct tcphdr *tcph;
371 int dir; 364 int dir;
372 __be32 newseq, newack; 365 __be32 newseq, newack;
373 s16 seqoff, ackoff; 366 s32 seqoff, ackoff;
374 struct nf_conn_nat *nat = nfct_nat(ct); 367 struct nf_conn_nat *nat = nfct_nat(ct);
375 struct nf_nat_seq *this_way, *other_way; 368 struct nf_nat_seq *this_way, *other_way;
369 int res;
376 370
377 dir = CTINFO2DIR(ctinfo); 371 dir = CTINFO2DIR(ctinfo);
378 372
@@ -383,6 +377,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
383 return 0; 377 return 0;
384 378
385 tcph = (void *)skb->data + protoff; 379 tcph = (void *)skb->data + protoff;
380 spin_lock_bh(&ct->lock);
386 if (after(ntohl(tcph->seq), this_way->correction_pos)) 381 if (after(ntohl(tcph->seq), this_way->correction_pos))
387 seqoff = this_way->offset_after; 382 seqoff = this_way->offset_after;
388 else 383 else
@@ -407,7 +402,10 @@ nf_nat_seq_adjust(struct sk_buff *skb,
407 tcph->seq = newseq; 402 tcph->seq = newseq;
408 tcph->ack_seq = newack; 403 tcph->ack_seq = newack;
409 404
410 return nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo); 405 res = nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
406 spin_unlock_bh(&ct->lock);
407
408 return res;
411} 409}
412 410
413/* Setup NAT on this expected conntrack so it follows master. */ 411/* Setup NAT on this expected conntrack so it follows master. */
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
deleted file mode 100644
index 474d621cbc2e..000000000000
--- a/net/netfilter/nf_tproxy_core.c
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Transparent proxy support for Linux/iptables
3 *
4 * Copyright (c) 2006-2007 BalaBit IT Ltd.
5 * Author: Balazs Scheidler, Krisztian Kovacs
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14
15#include <linux/net.h>
16#include <linux/if.h>
17#include <linux/netdevice.h>
18#include <net/udp.h>
19#include <net/netfilter/nf_tproxy_core.h>
20
21
22static void
23nf_tproxy_destructor(struct sk_buff *skb)
24{
25 struct sock *sk = skb->sk;
26
27 skb->sk = NULL;
28 skb->destructor = NULL;
29
30 if (sk)
31 sock_put(sk);
32}
33
34/* consumes sk */
35void
36nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
37{
38 /* assigning tw sockets complicates things; most
39 * skb->sk->X checks would have to test sk->sk_state first */
40 if (sk->sk_state == TCP_TIME_WAIT) {
41 inet_twsk_put(inet_twsk(sk));
42 return;
43 }
44
45 skb_orphan(skb);
46 skb->sk = sk;
47 skb->destructor = nf_tproxy_destructor;
48}
49EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
50
51static int __init nf_tproxy_init(void)
52{
53 pr_info("NF_TPROXY: Transparent proxy support initialized, version 4.1.0\n");
54 pr_info("NF_TPROXY: Copyright (c) 2006-2007 BalaBit IT Ltd.\n");
55 return 0;
56}
57
58module_init(nf_tproxy_init);
59
60MODULE_LICENSE("GPL");
61MODULE_AUTHOR("Krisztian Kovacs");
62MODULE_DESCRIPTION("Transparent proxy support core routines");
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 8a703c3dd318..95a98c8c1da6 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -862,6 +862,7 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
862 [NFQA_MARK] = { .type = NLA_U32 }, 862 [NFQA_MARK] = { .type = NLA_U32 },
863 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, 863 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
864 [NFQA_CT] = { .type = NLA_UNSPEC }, 864 [NFQA_CT] = { .type = NLA_UNSPEC },
865 [NFQA_EXP] = { .type = NLA_UNSPEC },
865}; 866};
866 867
867static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { 868static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -990,9 +991,14 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
990 if (entry == NULL) 991 if (entry == NULL)
991 return -ENOENT; 992 return -ENOENT;
992 993
993 rcu_read_lock(); 994 if (nfqa[NFQA_CT]) {
994 if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
995 ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo); 995 ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
996 if (ct && nfqa[NFQA_EXP]) {
997 nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
998 NETLINK_CB(skb).portid,
999 nlmsg_report(nlh));
1000 }
1001 }
996 1002
997 if (nfqa[NFQA_PAYLOAD]) { 1003 if (nfqa[NFQA_PAYLOAD]) {
998 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]); 1004 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
@@ -1005,7 +1011,6 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
1005 if (ct) 1011 if (ct)
1006 nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff); 1012 nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
1007 } 1013 }
1008 rcu_read_unlock();
1009 1014
1010 if (nfqa[NFQA_MARK]) 1015 if (nfqa[NFQA_MARK])
1011 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); 1016 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c
index ab61d66bc0b9..be893039966d 100644
--- a/net/netfilter/nfnetlink_queue_ct.c
+++ b/net/netfilter/nfnetlink_queue_ct.c
@@ -96,3 +96,18 @@ void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
96 if ((ct->status & IPS_NAT_MASK) && diff) 96 if ((ct->status & IPS_NAT_MASK) && diff)
97 nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff); 97 nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff);
98} 98}
99
100int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
101 u32 portid, u32 report)
102{
103 struct nfq_ct_hook *nfq_ct;
104
105 if (nf_ct_is_untracked(ct))
106 return 0;
107
108 nfq_ct = rcu_dereference(nfq_ct_hook);
109 if (nfq_ct == NULL)
110 return -EOPNOTSUPP;
111
112 return nfq_ct->attach_expect(attr, ct, portid, report);
113}
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index d7f195388f66..5d8a3a3cd5a7 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -15,7 +15,9 @@
15#include <linux/ip.h> 15#include <linux/ip.h>
16#include <net/checksum.h> 16#include <net/checksum.h>
17#include <net/udp.h> 17#include <net/udp.h>
18#include <net/tcp.h>
18#include <net/inet_sock.h> 19#include <net/inet_sock.h>
20#include <net/inet_hashtables.h>
19#include <linux/inetdevice.h> 21#include <linux/inetdevice.h>
20#include <linux/netfilter/x_tables.h> 22#include <linux/netfilter/x_tables.h>
21#include <linux/netfilter_ipv4/ip_tables.h> 23#include <linux/netfilter_ipv4/ip_tables.h>
@@ -26,13 +28,18 @@
26#define XT_TPROXY_HAVE_IPV6 1 28#define XT_TPROXY_HAVE_IPV6 1
27#include <net/if_inet6.h> 29#include <net/if_inet6.h>
28#include <net/addrconf.h> 30#include <net/addrconf.h>
31#include <net/inet6_hashtables.h>
29#include <linux/netfilter_ipv6/ip6_tables.h> 32#include <linux/netfilter_ipv6/ip6_tables.h>
30#include <net/netfilter/ipv6/nf_defrag_ipv6.h> 33#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
31#endif 34#endif
32 35
33#include <net/netfilter/nf_tproxy_core.h>
34#include <linux/netfilter/xt_TPROXY.h> 36#include <linux/netfilter/xt_TPROXY.h>
35 37
38enum nf_tproxy_lookup_t {
39 NFT_LOOKUP_LISTENER,
40 NFT_LOOKUP_ESTABLISHED,
41};
42
36static bool tproxy_sk_is_transparent(struct sock *sk) 43static bool tproxy_sk_is_transparent(struct sock *sk)
37{ 44{
38 if (sk->sk_state != TCP_TIME_WAIT) { 45 if (sk->sk_state != TCP_TIME_WAIT) {
@@ -68,6 +75,157 @@ tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
68 return laddr ? laddr : daddr; 75 return laddr ? laddr : daddr;
69} 76}
70 77
78/*
79 * This is used when the user wants to intercept a connection matching
80 * an explicit iptables rule. In this case the sockets are assumed
81 * matching in preference order:
82 *
83 * - match: if there's a fully established connection matching the
84 * _packet_ tuple, it is returned, assuming the redirection
85 * already took place and we process a packet belonging to an
86 * established connection
87 *
88 * - match: if there's a listening socket matching the redirection
89 * (e.g. on-port & on-ip of the connection), it is returned,
90 * regardless if it was bound to 0.0.0.0 or an explicit
91 * address. The reasoning is that if there's an explicit rule, it
92 * does not really matter if the listener is bound to an interface
93 * or to 0. The user already stated that he wants redirection
94 * (since he added the rule).
95 *
96 * Please note that there's an overlap between what a TPROXY target
97 * and a socket match will match. Normally if you have both rules the
98 * "socket" match will be the first one, effectively all packets
99 * belonging to established connections going through that one.
100 */
101static inline struct sock *
102nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
103 const __be32 saddr, const __be32 daddr,
104 const __be16 sport, const __be16 dport,
105 const struct net_device *in,
106 const enum nf_tproxy_lookup_t lookup_type)
107{
108 struct sock *sk;
109
110 switch (protocol) {
111 case IPPROTO_TCP:
112 switch (lookup_type) {
113 case NFT_LOOKUP_LISTENER:
114 sk = inet_lookup_listener(net, &tcp_hashinfo,
115 saddr, sport,
116 daddr, dport,
117 in->ifindex);
118
119 /* NOTE: we return listeners even if bound to
120 * 0.0.0.0, those are filtered out in
121 * xt_socket, since xt_TPROXY needs 0 bound
122 * listeners too
123 */
124 break;
125 case NFT_LOOKUP_ESTABLISHED:
126 sk = inet_lookup_established(net, &tcp_hashinfo,
127 saddr, sport, daddr, dport,
128 in->ifindex);
129 break;
130 default:
131 BUG();
132 }
133 break;
134 case IPPROTO_UDP:
135 sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
136 in->ifindex);
137 if (sk) {
138 int connected = (sk->sk_state == TCP_ESTABLISHED);
139 int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
140
141 /* NOTE: we return listeners even if bound to
142 * 0.0.0.0, those are filtered out in
143 * xt_socket, since xt_TPROXY needs 0 bound
144 * listeners too
145 */
146 if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
147 (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
148 sock_put(sk);
149 sk = NULL;
150 }
151 }
152 break;
153 default:
154 WARN_ON(1);
155 sk = NULL;
156 }
157
158 pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
159 protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
160
161 return sk;
162}
163
164#ifdef XT_TPROXY_HAVE_IPV6
165static inline struct sock *
166nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
167 const struct in6_addr *saddr, const struct in6_addr *daddr,
168 const __be16 sport, const __be16 dport,
169 const struct net_device *in,
170 const enum nf_tproxy_lookup_t lookup_type)
171{
172 struct sock *sk;
173
174 switch (protocol) {
175 case IPPROTO_TCP:
176 switch (lookup_type) {
177 case NFT_LOOKUP_LISTENER:
178 sk = inet6_lookup_listener(net, &tcp_hashinfo,
179 saddr, sport,
180 daddr, ntohs(dport),
181 in->ifindex);
182
183 /* NOTE: we return listeners even if bound to
184 * 0.0.0.0, those are filtered out in
185 * xt_socket, since xt_TPROXY needs 0 bound
186 * listeners too
187 */
188 break;
189 case NFT_LOOKUP_ESTABLISHED:
190 sk = __inet6_lookup_established(net, &tcp_hashinfo,
191 saddr, sport, daddr, ntohs(dport),
192 in->ifindex);
193 break;
194 default:
195 BUG();
196 }
197 break;
198 case IPPROTO_UDP:
199 sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
200 in->ifindex);
201 if (sk) {
202 int connected = (sk->sk_state == TCP_ESTABLISHED);
203 int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
204
205 /* NOTE: we return listeners even if bound to
206 * 0.0.0.0, those are filtered out in
207 * xt_socket, since xt_TPROXY needs 0 bound
208 * listeners too
209 */
210 if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
211 (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
212 sock_put(sk);
213 sk = NULL;
214 }
215 }
216 break;
217 default:
218 WARN_ON(1);
219 sk = NULL;
220 }
221
222 pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n",
223 protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
224
225 return sk;
226}
227#endif
228
71/** 229/**
72 * tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections 230 * tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
73 * @skb: The skb being processed. 231 * @skb: The skb being processed.
@@ -117,6 +275,15 @@ tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport,
117 return sk; 275 return sk;
118} 276}
119 277
278/* assign a socket to the skb -- consumes sk */
279static void
280nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
281{
282 skb_orphan(skb);
283 skb->sk = sk;
284 skb->destructor = sock_edemux;
285}
286
120static unsigned int 287static unsigned int
121tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, 288tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
122 u_int32_t mark_mask, u_int32_t mark_value) 289 u_int32_t mark_mask, u_int32_t mark_value)
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 68ff29f60867..fab6eea1bf38 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -202,7 +202,7 @@ static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
202 return -EINVAL; 202 return -EINVAL;
203 } 203 }
204 if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) { 204 if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
205 pr_err("ipv6 PROHIBT (THROW, NAT ..) matching not supported\n"); 205 pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n");
206 return -EINVAL; 206 return -EINVAL;
207 } 207 }
208 if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) { 208 if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 20b15916f403..06df2b9110f5 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -19,12 +19,12 @@
19#include <net/icmp.h> 19#include <net/icmp.h>
20#include <net/sock.h> 20#include <net/sock.h>
21#include <net/inet_sock.h> 21#include <net/inet_sock.h>
22#include <net/netfilter/nf_tproxy_core.h>
23#include <net/netfilter/ipv4/nf_defrag_ipv4.h> 22#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
24 23
25#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 24#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
26#define XT_SOCKET_HAVE_IPV6 1 25#define XT_SOCKET_HAVE_IPV6 1
27#include <linux/netfilter_ipv6/ip6_tables.h> 26#include <linux/netfilter_ipv6/ip6_tables.h>
27#include <net/inet6_hashtables.h>
28#include <net/netfilter/ipv6/nf_defrag_ipv6.h> 28#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
29#endif 29#endif
30 30
@@ -101,6 +101,43 @@ extract_icmp4_fields(const struct sk_buff *skb,
101 return 0; 101 return 0;
102} 102}
103 103
104/* "socket" match based redirection (no specific rule)
105 * ===================================================
106 *
107 * There are connections with dynamic endpoints (e.g. FTP data
108 * connection) that the user is unable to add explicit rules
109 * for. These are taken care of by a generic "socket" rule. It is
110 * assumed that the proxy application is trusted to open such
111 * connections without explicit iptables rule (except of course the
112 * generic 'socket' rule). In this case the following sockets are
113 * matched in preference order:
114 *
115 * - match: if there's a fully established connection matching the
116 * _packet_ tuple
117 *
118 * - match: if there's a non-zero bound listener (possibly with a
119 * non-local address) We don't accept zero-bound listeners, since
120 * then local services could intercept traffic going through the
121 * box.
122 */
123static struct sock *
124xt_socket_get_sock_v4(struct net *net, const u8 protocol,
125 const __be32 saddr, const __be32 daddr,
126 const __be16 sport, const __be16 dport,
127 const struct net_device *in)
128{
129 switch (protocol) {
130 case IPPROTO_TCP:
131 return __inet_lookup(net, &tcp_hashinfo,
132 saddr, sport, daddr, dport,
133 in->ifindex);
134 case IPPROTO_UDP:
135 return udp4_lib_lookup(net, saddr, sport, daddr, dport,
136 in->ifindex);
137 }
138 return NULL;
139}
140
104static bool 141static bool
105socket_match(const struct sk_buff *skb, struct xt_action_param *par, 142socket_match(const struct sk_buff *skb, struct xt_action_param *par,
106 const struct xt_socket_mtinfo1 *info) 143 const struct xt_socket_mtinfo1 *info)
@@ -156,9 +193,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
156#endif 193#endif
157 194
158 if (!sk) 195 if (!sk)
159 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, 196 sk = xt_socket_get_sock_v4(dev_net(skb->dev), protocol,
160 saddr, daddr, sport, dport, 197 saddr, daddr, sport, dport,
161 par->in, NFT_LOOKUP_ANY); 198 par->in);
162 if (sk) { 199 if (sk) {
163 bool wildcard; 200 bool wildcard;
164 bool transparent = true; 201 bool transparent = true;
@@ -265,6 +302,25 @@ extract_icmp6_fields(const struct sk_buff *skb,
265 return 0; 302 return 0;
266} 303}
267 304
305static struct sock *
306xt_socket_get_sock_v6(struct net *net, const u8 protocol,
307 const struct in6_addr *saddr, const struct in6_addr *daddr,
308 const __be16 sport, const __be16 dport,
309 const struct net_device *in)
310{
311 switch (protocol) {
312 case IPPROTO_TCP:
313 return inet6_lookup(net, &tcp_hashinfo,
314 saddr, sport, daddr, dport,
315 in->ifindex);
316 case IPPROTO_UDP:
317 return udp6_lib_lookup(net, saddr, sport, daddr, dport,
318 in->ifindex);
319 }
320
321 return NULL;
322}
323
268static bool 324static bool
269socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) 325socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
270{ 326{
@@ -302,9 +358,9 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
302 } 358 }
303 359
304 if (!sk) 360 if (!sk)
305 sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, 361 sk = xt_socket_get_sock_v6(dev_net(skb->dev), tproto,
306 saddr, daddr, sport, dport, 362 saddr, daddr, sport, dport,
307 par->in, NFT_LOOKUP_ANY); 363 par->in);
308 if (sk) { 364 if (sk) {
309 bool wildcard; 365 bool wildcard;
310 bool transparent = true; 366 bool transparent = true;