aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-04-12 22:34:56 -0400
committerDavid S. Miller <davem@davemloft.net>2016-04-12 22:34:56 -0400
commitda0caadf0a05945bf2ef017d43e4eae1e2859b92 (patch)
tree9c229267ca7369672add23ebffdf01a46c6aa339
parent69fb78121bf86c87490ea1453e310a39c320a70a (diff)
parentecdfb48cddfd1096343148113d5b1bd789033aa8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says: ==================== Netfilter updates for net-next The following patchset contains the first batch of Netfilter updates for your net-next tree. 1) Define pr_fmt() in nf_conntrack, from Weongyo Jeong. 2) Define and register netfilter's afinfo for the bridge family, this comes in preparation for native nfqueue's bridge for nft, from Stephane Bryant. 3) Add new attributes to store layer 2 and VLAN headers to nfqueue, also from Stephane Bryant. 4) Parse new NFQA_VLAN and NFQA_L2HDR nfqueue netlink attributes coming from userspace, from Stephane Bryant. 5) Use net->ipv6.devconf_all->hop_limit instead of hardcoded hop_limit in IPv6 SYNPROXY, from Liping Zhang. 6) Remove unnecessary check for dst == NULL in nf_reject_ipv6, from Haishuang Yan. 7) Deinline ctnetlink event report functions, from Florian Westphal. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h108
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h10
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c47
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c56
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c15
-rw-r--r--net/netfilter/nf_conntrack_ecache.c84
-rw-r--r--net/netfilter/nfnetlink_queue.c105
8 files changed, 298 insertions, 129 deletions
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 57c880378443..fa36447371c6 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -73,6 +73,8 @@ void nf_conntrack_unregister_notifier(struct net *net,
73 struct nf_ct_event_notifier *nb); 73 struct nf_ct_event_notifier *nb);
74 74
75void nf_ct_deliver_cached_events(struct nf_conn *ct); 75void nf_ct_deliver_cached_events(struct nf_conn *ct);
76int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
77 u32 portid, int report);
76 78
77static inline void 79static inline void
78nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) 80nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
@@ -91,69 +93,25 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
91} 93}
92 94
93static inline int 95static inline int
94nf_conntrack_eventmask_report(unsigned int eventmask,
95 struct nf_conn *ct,
96 u32 portid,
97 int report)
98{
99 int ret = 0;
100 struct net *net = nf_ct_net(ct);
101 struct nf_ct_event_notifier *notify;
102 struct nf_conntrack_ecache *e;
103
104 rcu_read_lock();
105 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
106 if (notify == NULL)
107 goto out_unlock;
108
109 e = nf_ct_ecache_find(ct);
110 if (e == NULL)
111 goto out_unlock;
112
113 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
114 struct nf_ct_event item = {
115 .ct = ct,
116 .portid = e->portid ? e->portid : portid,
117 .report = report
118 };
119 /* This is a resent of a destroy event? If so, skip missed */
120 unsigned long missed = e->portid ? 0 : e->missed;
121
122 if (!((eventmask | missed) & e->ctmask))
123 goto out_unlock;
124
125 ret = notify->fcn(eventmask | missed, &item);
126 if (unlikely(ret < 0 || missed)) {
127 spin_lock_bh(&ct->lock);
128 if (ret < 0) {
129 /* This is a destroy event that has been
130 * triggered by a process, we store the PORTID
131 * to include it in the retransmission. */
132 if (eventmask & (1 << IPCT_DESTROY) &&
133 e->portid == 0 && portid != 0)
134 e->portid = portid;
135 else
136 e->missed |= eventmask;
137 } else
138 e->missed &= ~missed;
139 spin_unlock_bh(&ct->lock);
140 }
141 }
142out_unlock:
143 rcu_read_unlock();
144 return ret;
145}
146
147static inline int
148nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, 96nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
149 u32 portid, int report) 97 u32 portid, int report)
150{ 98{
99 const struct net *net = nf_ct_net(ct);
100
101 if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
102 return 0;
103
151 return nf_conntrack_eventmask_report(1 << event, ct, portid, report); 104 return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
152} 105}
153 106
154static inline int 107static inline int
155nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) 108nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
156{ 109{
110 const struct net *net = nf_ct_net(ct);
111
112 if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
113 return 0;
114
157 return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); 115 return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
158} 116}
159 117
@@ -172,43 +130,9 @@ int nf_ct_expect_register_notifier(struct net *net,
172void nf_ct_expect_unregister_notifier(struct net *net, 130void nf_ct_expect_unregister_notifier(struct net *net,
173 struct nf_exp_event_notifier *nb); 131 struct nf_exp_event_notifier *nb);
174 132
175static inline void 133void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
176nf_ct_expect_event_report(enum ip_conntrack_expect_events event, 134 struct nf_conntrack_expect *exp,
177 struct nf_conntrack_expect *exp, 135 u32 portid, int report);
178 u32 portid,
179 int report)
180{
181 struct net *net = nf_ct_exp_net(exp);
182 struct nf_exp_event_notifier *notify;
183 struct nf_conntrack_ecache *e;
184
185 rcu_read_lock();
186 notify = rcu_dereference(net->ct.nf_expect_event_cb);
187 if (notify == NULL)
188 goto out_unlock;
189
190 e = nf_ct_ecache_find(exp->master);
191 if (e == NULL)
192 goto out_unlock;
193
194 if (e->expmask & (1 << event)) {
195 struct nf_exp_event item = {
196 .exp = exp,
197 .portid = portid,
198 .report = report
199 };
200 notify->fcn(1 << event, &item);
201 }
202out_unlock:
203 rcu_read_unlock();
204}
205
206static inline void
207nf_ct_expect_event(enum ip_conntrack_expect_events event,
208 struct nf_conntrack_expect *exp)
209{
210 nf_ct_expect_event_report(event, exp, 0, 0);
211}
212 136
213int nf_conntrack_ecache_pernet_init(struct net *net); 137int nf_conntrack_ecache_pernet_init(struct net *net);
214void nf_conntrack_ecache_pernet_fini(struct net *net); 138void nf_conntrack_ecache_pernet_fini(struct net *net);
@@ -245,8 +169,6 @@ static inline int nf_conntrack_event_report(enum ip_conntrack_events event,
245 u32 portid, 169 u32 portid,
246 int report) { return 0; } 170 int report) { return 0; }
247static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} 171static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
248static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
249 struct nf_conntrack_expect *exp) {}
250static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e, 172static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
251 struct nf_conntrack_expect *exp, 173 struct nf_conntrack_expect *exp,
252 u32 portid, 174 u32 portid,
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index b67a853638ff..ae30841ff94e 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -30,6 +30,14 @@ struct nfqnl_msg_packet_timestamp {
30 __aligned_be64 usec; 30 __aligned_be64 usec;
31}; 31};
32 32
33enum nfqnl_vlan_attr {
34 NFQA_VLAN_UNSPEC,
35 NFQA_VLAN_PROTO, /* __be16 skb vlan_proto */
36 NFQA_VLAN_TCI, /* __be16 skb htons(vlan_tci) */
37 __NFQA_VLAN_MAX,
38};
39#define NFQA_VLAN_MAX (__NFQA_VLAN_MAX + 1)
40
33enum nfqnl_attr_type { 41enum nfqnl_attr_type {
34 NFQA_UNSPEC, 42 NFQA_UNSPEC,
35 NFQA_PACKET_HDR, 43 NFQA_PACKET_HDR,
@@ -50,6 +58,8 @@ enum nfqnl_attr_type {
50 NFQA_UID, /* __u32 sk uid */ 58 NFQA_UID, /* __u32 sk uid */
51 NFQA_GID, /* __u32 sk gid */ 59 NFQA_GID, /* __u32 sk gid */
52 NFQA_SECCTX, /* security context string */ 60 NFQA_SECCTX, /* security context string */
61 NFQA_VLAN, /* nested attribute: packet vlan info */
62 NFQA_L2HDR, /* full L2 header */
53 63
54 __NFQA_MAX 64 __NFQA_MAX
55}; 65};
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index 7fcdd7261d88..a78c4e2826e5 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -162,15 +162,57 @@ static const struct nf_chain_type filter_bridge = {
162 (1 << NF_BR_POST_ROUTING), 162 (1 << NF_BR_POST_ROUTING),
163}; 163};
164 164
165static void nf_br_saveroute(const struct sk_buff *skb,
166 struct nf_queue_entry *entry)
167{
168}
169
170static int nf_br_reroute(struct net *net, struct sk_buff *skb,
171 const struct nf_queue_entry *entry)
172{
173 return 0;
174}
175
176static __sum16 nf_br_checksum(struct sk_buff *skb, unsigned int hook,
177 unsigned int dataoff, u_int8_t protocol)
178{
179 return 0;
180}
181
182static __sum16 nf_br_checksum_partial(struct sk_buff *skb, unsigned int hook,
183 unsigned int dataoff, unsigned int len,
184 u_int8_t protocol)
185{
186 return 0;
187}
188
189static int nf_br_route(struct net *net, struct dst_entry **dst,
190 struct flowi *fl, bool strict __always_unused)
191{
192 return 0;
193}
194
195static const struct nf_afinfo nf_br_afinfo = {
196 .family = AF_BRIDGE,
197 .checksum = nf_br_checksum,
198 .checksum_partial = nf_br_checksum_partial,
199 .route = nf_br_route,
200 .saveroute = nf_br_saveroute,
201 .reroute = nf_br_reroute,
202 .route_key_size = 0,
203};
204
165static int __init nf_tables_bridge_init(void) 205static int __init nf_tables_bridge_init(void)
166{ 206{
167 int ret; 207 int ret;
168 208
209 nf_register_afinfo(&nf_br_afinfo);
169 nft_register_chain_type(&filter_bridge); 210 nft_register_chain_type(&filter_bridge);
170 ret = register_pernet_subsys(&nf_tables_bridge_net_ops); 211 ret = register_pernet_subsys(&nf_tables_bridge_net_ops);
171 if (ret < 0) 212 if (ret < 0) {
172 nft_unregister_chain_type(&filter_bridge); 213 nft_unregister_chain_type(&filter_bridge);
173 214 nf_unregister_afinfo(&nf_br_afinfo);
215 }
174 return ret; 216 return ret;
175} 217}
176 218
@@ -178,6 +220,7 @@ static void __exit nf_tables_bridge_exit(void)
178{ 220{
179 unregister_pernet_subsys(&nf_tables_bridge_net_ops); 221 unregister_pernet_subsys(&nf_tables_bridge_net_ops);
180 nft_unregister_chain_type(&filter_bridge); 222 nft_unregister_chain_type(&filter_bridge);
223 nf_unregister_afinfo(&nf_br_afinfo);
181} 224}
182 225
183module_init(nf_tables_bridge_init); 226module_init(nf_tables_bridge_init);
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 3deed5860a42..5d778dd11f66 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -20,15 +20,16 @@
20#include <net/netfilter/nf_conntrack_synproxy.h> 20#include <net/netfilter/nf_conntrack_synproxy.h>
21 21
22static struct ipv6hdr * 22static struct ipv6hdr *
23synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr, 23synproxy_build_ip(struct net *net, struct sk_buff *skb,
24 const struct in6_addr *daddr) 24 const struct in6_addr *saddr,
25 const struct in6_addr *daddr)
25{ 26{
26 struct ipv6hdr *iph; 27 struct ipv6hdr *iph;
27 28
28 skb_reset_network_header(skb); 29 skb_reset_network_header(skb);
29 iph = (struct ipv6hdr *)skb_put(skb, sizeof(*iph)); 30 iph = (struct ipv6hdr *)skb_put(skb, sizeof(*iph));
30 ip6_flow_hdr(iph, 0, 0); 31 ip6_flow_hdr(iph, 0, 0);
31 iph->hop_limit = 64; //XXX 32 iph->hop_limit = net->ipv6.devconf_all->hop_limit;
32 iph->nexthdr = IPPROTO_TCP; 33 iph->nexthdr = IPPROTO_TCP;
33 iph->saddr = *saddr; 34 iph->saddr = *saddr;
34 iph->daddr = *daddr; 35 iph->daddr = *daddr;
@@ -37,13 +38,12 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
37} 38}
38 39
39static void 40static void
40synproxy_send_tcp(const struct synproxy_net *snet, 41synproxy_send_tcp(struct net *net,
41 const struct sk_buff *skb, struct sk_buff *nskb, 42 const struct sk_buff *skb, struct sk_buff *nskb,
42 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, 43 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
43 struct ipv6hdr *niph, struct tcphdr *nth, 44 struct ipv6hdr *niph, struct tcphdr *nth,
44 unsigned int tcp_hdr_size) 45 unsigned int tcp_hdr_size)
45{ 46{
46 struct net *net = nf_ct_net(snet->tmpl);
47 struct dst_entry *dst; 47 struct dst_entry *dst;
48 struct flowi6 fl6; 48 struct flowi6 fl6;
49 49
@@ -84,7 +84,7 @@ free_nskb:
84} 84}
85 85
86static void 86static void
87synproxy_send_client_synack(const struct synproxy_net *snet, 87synproxy_send_client_synack(struct net *net,
88 const struct sk_buff *skb, const struct tcphdr *th, 88 const struct sk_buff *skb, const struct tcphdr *th,
89 const struct synproxy_options *opts) 89 const struct synproxy_options *opts)
90{ 90{
@@ -103,7 +103,7 @@ synproxy_send_client_synack(const struct synproxy_net *snet,
103 return; 103 return;
104 skb_reserve(nskb, MAX_TCP_HEADER); 104 skb_reserve(nskb, MAX_TCP_HEADER);
105 105
106 niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr); 106 niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr);
107 107
108 skb_reset_transport_header(nskb); 108 skb_reset_transport_header(nskb);
109 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); 109 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -121,15 +121,16 @@ synproxy_send_client_synack(const struct synproxy_net *snet,
121 121
122 synproxy_build_options(nth, opts); 122 synproxy_build_options(nth, opts);
123 123
124 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 124 synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
125 niph, nth, tcp_hdr_size); 125 niph, nth, tcp_hdr_size);
126} 126}
127 127
128static void 128static void
129synproxy_send_server_syn(const struct synproxy_net *snet, 129synproxy_send_server_syn(struct net *net,
130 const struct sk_buff *skb, const struct tcphdr *th, 130 const struct sk_buff *skb, const struct tcphdr *th,
131 const struct synproxy_options *opts, u32 recv_seq) 131 const struct synproxy_options *opts, u32 recv_seq)
132{ 132{
133 struct synproxy_net *snet = synproxy_pernet(net);
133 struct sk_buff *nskb; 134 struct sk_buff *nskb;
134 struct ipv6hdr *iph, *niph; 135 struct ipv6hdr *iph, *niph;
135 struct tcphdr *nth; 136 struct tcphdr *nth;
@@ -144,7 +145,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
144 return; 145 return;
145 skb_reserve(nskb, MAX_TCP_HEADER); 146 skb_reserve(nskb, MAX_TCP_HEADER);
146 147
147 niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr); 148 niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr);
148 149
149 skb_reset_transport_header(nskb); 150 skb_reset_transport_header(nskb);
150 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); 151 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -165,12 +166,12 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
165 166
166 synproxy_build_options(nth, opts); 167 synproxy_build_options(nth, opts);
167 168
168 synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, 169 synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
169 niph, nth, tcp_hdr_size); 170 niph, nth, tcp_hdr_size);
170} 171}
171 172
172static void 173static void
173synproxy_send_server_ack(const struct synproxy_net *snet, 174synproxy_send_server_ack(struct net *net,
174 const struct ip_ct_tcp *state, 175 const struct ip_ct_tcp *state,
175 const struct sk_buff *skb, const struct tcphdr *th, 176 const struct sk_buff *skb, const struct tcphdr *th,
176 const struct synproxy_options *opts) 177 const struct synproxy_options *opts)
@@ -189,7 +190,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
189 return; 190 return;
190 skb_reserve(nskb, MAX_TCP_HEADER); 191 skb_reserve(nskb, MAX_TCP_HEADER);
191 192
192 niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr); 193 niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr);
193 194
194 skb_reset_transport_header(nskb); 195 skb_reset_transport_header(nskb);
195 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); 196 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -205,11 +206,11 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
205 206
206 synproxy_build_options(nth, opts); 207 synproxy_build_options(nth, opts);
207 208
208 synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 209 synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
209} 210}
210 211
211static void 212static void
212synproxy_send_client_ack(const struct synproxy_net *snet, 213synproxy_send_client_ack(struct net *net,
213 const struct sk_buff *skb, const struct tcphdr *th, 214 const struct sk_buff *skb, const struct tcphdr *th,
214 const struct synproxy_options *opts) 215 const struct synproxy_options *opts)
215{ 216{
@@ -227,7 +228,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
227 return; 228 return;
228 skb_reserve(nskb, MAX_TCP_HEADER); 229 skb_reserve(nskb, MAX_TCP_HEADER);
229 230
230 niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr); 231 niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr);
231 232
232 skb_reset_transport_header(nskb); 233 skb_reset_transport_header(nskb);
233 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); 234 nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -243,15 +244,16 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
243 244
244 synproxy_build_options(nth, opts); 245 synproxy_build_options(nth, opts);
245 246
246 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 247 synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
247 niph, nth, tcp_hdr_size); 248 niph, nth, tcp_hdr_size);
248} 249}
249 250
250static bool 251static bool
251synproxy_recv_client_ack(const struct synproxy_net *snet, 252synproxy_recv_client_ack(struct net *net,
252 const struct sk_buff *skb, const struct tcphdr *th, 253 const struct sk_buff *skb, const struct tcphdr *th,
253 struct synproxy_options *opts, u32 recv_seq) 254 struct synproxy_options *opts, u32 recv_seq)
254{ 255{
256 struct synproxy_net *snet = synproxy_pernet(net);
255 int mss; 257 int mss;
256 258
257 mss = __cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1); 259 mss = __cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1);
@@ -267,7 +269,7 @@ synproxy_recv_client_ack(const struct synproxy_net *snet,
267 if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) 269 if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
268 synproxy_check_timestamp_cookie(opts); 270 synproxy_check_timestamp_cookie(opts);
269 271
270 synproxy_send_server_syn(snet, skb, th, opts, recv_seq); 272 synproxy_send_server_syn(net, skb, th, opts, recv_seq);
271 return true; 273 return true;
272} 274}
273 275
@@ -275,7 +277,8 @@ static unsigned int
275synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) 277synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
276{ 278{
277 const struct xt_synproxy_info *info = par->targinfo; 279 const struct xt_synproxy_info *info = par->targinfo;
278 struct synproxy_net *snet = synproxy_pernet(par->net); 280 struct net *net = par->net;
281 struct synproxy_net *snet = synproxy_pernet(net);
279 struct synproxy_options opts = {}; 282 struct synproxy_options opts = {};
280 struct tcphdr *th, _th; 283 struct tcphdr *th, _th;
281 284
@@ -304,12 +307,12 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
304 XT_SYNPROXY_OPT_SACK_PERM | 307 XT_SYNPROXY_OPT_SACK_PERM |
305 XT_SYNPROXY_OPT_ECN); 308 XT_SYNPROXY_OPT_ECN);
306 309
307 synproxy_send_client_synack(snet, skb, th, &opts); 310 synproxy_send_client_synack(net, skb, th, &opts);
308 return NF_DROP; 311 return NF_DROP;
309 312
310 } else if (th->ack && !(th->fin || th->rst || th->syn)) { 313 } else if (th->ack && !(th->fin || th->rst || th->syn)) {
311 /* ACK from client */ 314 /* ACK from client */
312 synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); 315 synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq));
313 return NF_DROP; 316 return NF_DROP;
314 } 317 }
315 318
@@ -320,7 +323,8 @@ static unsigned int ipv6_synproxy_hook(void *priv,
320 struct sk_buff *skb, 323 struct sk_buff *skb,
321 const struct nf_hook_state *nhs) 324 const struct nf_hook_state *nhs)
322{ 325{
323 struct synproxy_net *snet = synproxy_pernet(nhs->net); 326 struct net *net = nhs->net;
327 struct synproxy_net *snet = synproxy_pernet(net);
324 enum ip_conntrack_info ctinfo; 328 enum ip_conntrack_info ctinfo;
325 struct nf_conn *ct; 329 struct nf_conn *ct;
326 struct nf_conn_synproxy *synproxy; 330 struct nf_conn_synproxy *synproxy;
@@ -384,7 +388,7 @@ static unsigned int ipv6_synproxy_hook(void *priv,
384 * therefore we need to add 1 to make the SYN sequence 388 * therefore we need to add 1 to make the SYN sequence
385 * number match the one of first SYN. 389 * number match the one of first SYN.
386 */ 390 */
387 if (synproxy_recv_client_ack(snet, skb, th, &opts, 391 if (synproxy_recv_client_ack(net, skb, th, &opts,
388 ntohl(th->seq) + 1)) 392 ntohl(th->seq) + 1))
389 this_cpu_inc(snet->stats->cookie_retrans); 393 this_cpu_inc(snet->stats->cookie_retrans);
390 394
@@ -410,12 +414,12 @@ static unsigned int ipv6_synproxy_hook(void *priv,
410 XT_SYNPROXY_OPT_SACK_PERM); 414 XT_SYNPROXY_OPT_SACK_PERM);
411 415
412 swap(opts.tsval, opts.tsecr); 416 swap(opts.tsval, opts.tsecr);
413 synproxy_send_server_ack(snet, state, skb, th, &opts); 417 synproxy_send_server_ack(net, state, skb, th, &opts);
414 418
415 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); 419 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
416 420
417 swap(opts.tsval, opts.tsecr); 421 swap(opts.tsval, opts.tsecr);
418 synproxy_send_client_ack(snet, skb, th, &opts); 422 synproxy_send_client_ack(net, skb, th, &opts);
419 423
420 consume_skb(skb); 424 consume_skb(skb);
421 return NF_STOLEN; 425 return NF_STOLEN;
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 4709f657b7b6..a5400223fd74 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -158,7 +158,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
158 fl6.fl6_dport = otcph->source; 158 fl6.fl6_dport = otcph->source;
159 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 159 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
160 dst = ip6_route_output(net, NULL, &fl6); 160 dst = ip6_route_output(net, NULL, &fl6);
161 if (dst == NULL || dst->error) { 161 if (dst->error) {
162 dst_release(dst); 162 dst_release(dst);
163 return; 163 return;
164 } 164 }
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index afde5f5e728a..2fd607408998 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -12,6 +12,8 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
15#include <linux/types.h> 17#include <linux/types.h>
16#include <linux/netfilter.h> 18#include <linux/netfilter.h>
17#include <linux/module.h> 19#include <linux/module.h>
@@ -966,7 +968,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
966 968
967 if (!l4proto->new(ct, skb, dataoff, timeouts)) { 969 if (!l4proto->new(ct, skb, dataoff, timeouts)) {
968 nf_conntrack_free(ct); 970 nf_conntrack_free(ct);
969 pr_debug("init conntrack: can't track with proto module\n"); 971 pr_debug("can't track with proto module\n");
970 return NULL; 972 return NULL;
971 } 973 }
972 974
@@ -988,7 +990,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
988 spin_lock(&nf_conntrack_expect_lock); 990 spin_lock(&nf_conntrack_expect_lock);
989 exp = nf_ct_find_expectation(net, zone, tuple); 991 exp = nf_ct_find_expectation(net, zone, tuple);
990 if (exp) { 992 if (exp) {
991 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 993 pr_debug("expectation arrives ct=%p exp=%p\n",
992 ct, exp); 994 ct, exp);
993 /* Welcome, Mr. Bond. We've been expecting you... */ 995 /* Welcome, Mr. Bond. We've been expecting you... */
994 __set_bit(IPS_EXPECTED_BIT, &ct->status); 996 __set_bit(IPS_EXPECTED_BIT, &ct->status);
@@ -1053,7 +1055,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1053 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 1055 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1054 dataoff, l3num, protonum, net, &tuple, l3proto, 1056 dataoff, l3num, protonum, net, &tuple, l3proto,
1055 l4proto)) { 1057 l4proto)) {
1056 pr_debug("resolve_normal_ct: Can't get tuple\n"); 1058 pr_debug("Can't get tuple\n");
1057 return NULL; 1059 return NULL;
1058 } 1060 }
1059 1061
@@ -1079,14 +1081,13 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1079 } else { 1081 } else {
1080 /* Once we've had two way comms, always ESTABLISHED. */ 1082 /* Once we've had two way comms, always ESTABLISHED. */
1081 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { 1083 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1082 pr_debug("nf_conntrack_in: normal packet for %p\n", ct); 1084 pr_debug("normal packet for %p\n", ct);
1083 *ctinfo = IP_CT_ESTABLISHED; 1085 *ctinfo = IP_CT_ESTABLISHED;
1084 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { 1086 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1085 pr_debug("nf_conntrack_in: related packet for %p\n", 1087 pr_debug("related packet for %p\n", ct);
1086 ct);
1087 *ctinfo = IP_CT_RELATED; 1088 *ctinfo = IP_CT_RELATED;
1088 } else { 1089 } else {
1089 pr_debug("nf_conntrack_in: new packet for %p\n", ct); 1090 pr_debug("new packet for %p\n", ct);
1090 *ctinfo = IP_CT_NEW; 1091 *ctinfo = IP_CT_NEW;
1091 } 1092 }
1092 *set_reply = 0; 1093 *set_reply = 0;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 4e78c57b818f..d28011b42845 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -113,6 +113,60 @@ static void ecache_work(struct work_struct *work)
113 schedule_delayed_work(&ctnet->ecache_dwork, delay); 113 schedule_delayed_work(&ctnet->ecache_dwork, delay);
114} 114}
115 115
116int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
117 u32 portid, int report)
118{
119 int ret = 0;
120 struct net *net = nf_ct_net(ct);
121 struct nf_ct_event_notifier *notify;
122 struct nf_conntrack_ecache *e;
123
124 rcu_read_lock();
125 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
126 if (!notify)
127 goto out_unlock;
128
129 e = nf_ct_ecache_find(ct);
130 if (!e)
131 goto out_unlock;
132
133 if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
134 struct nf_ct_event item = {
135 .ct = ct,
136 .portid = e->portid ? e->portid : portid,
137 .report = report
138 };
139 /* This is a resent of a destroy event? If so, skip missed */
140 unsigned long missed = e->portid ? 0 : e->missed;
141
142 if (!((eventmask | missed) & e->ctmask))
143 goto out_unlock;
144
145 ret = notify->fcn(eventmask | missed, &item);
146 if (unlikely(ret < 0 || missed)) {
147 spin_lock_bh(&ct->lock);
148 if (ret < 0) {
149 /* This is a destroy event that has been
150 * triggered by a process, we store the PORTID
151 * to include it in the retransmission.
152 */
153 if (eventmask & (1 << IPCT_DESTROY) &&
154 e->portid == 0 && portid != 0)
155 e->portid = portid;
156 else
157 e->missed |= eventmask;
158 } else {
159 e->missed &= ~missed;
160 }
161 spin_unlock_bh(&ct->lock);
162 }
163 }
164out_unlock:
165 rcu_read_unlock();
166 return ret;
167}
168EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report);
169
116/* deliver cached events and clear cache entry - must be called with locally 170/* deliver cached events and clear cache entry - must be called with locally
117 * disabled softirqs */ 171 * disabled softirqs */
118void nf_ct_deliver_cached_events(struct nf_conn *ct) 172void nf_ct_deliver_cached_events(struct nf_conn *ct)
@@ -167,6 +221,36 @@ out_unlock:
167} 221}
168EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); 222EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
169 223
224void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
225 struct nf_conntrack_expect *exp,
226 u32 portid, int report)
227
228{
229 struct net *net = nf_ct_exp_net(exp);
230 struct nf_exp_event_notifier *notify;
231 struct nf_conntrack_ecache *e;
232
233 rcu_read_lock();
234 notify = rcu_dereference(net->ct.nf_expect_event_cb);
235 if (!notify)
236 goto out_unlock;
237
238 e = nf_ct_ecache_find(exp->master);
239 if (!e)
240 goto out_unlock;
241
242 if (e->expmask & (1 << event)) {
243 struct nf_exp_event item = {
244 .exp = exp,
245 .portid = portid,
246 .report = report
247 };
248 notify->fcn(1 << event, &item);
249 }
250out_unlock:
251 rcu_read_unlock();
252}
253
170int nf_conntrack_register_notifier(struct net *net, 254int nf_conntrack_register_notifier(struct net *net,
171 struct nf_ct_event_notifier *new) 255 struct nf_ct_event_notifier *new)
172{ 256{
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index cb5b630a645b..aa93877ab6e2 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -295,6 +295,59 @@ static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
295 return seclen; 295 return seclen;
296} 296}
297 297
298static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry)
299{
300 struct sk_buff *entskb = entry->skb;
301 u32 nlalen = 0;
302
303 if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
304 return 0;
305
306 if (skb_vlan_tag_present(entskb))
307 nlalen += nla_total_size(nla_total_size(sizeof(__be16)) +
308 nla_total_size(sizeof(__be16)));
309
310 if (entskb->network_header > entskb->mac_header)
311 nlalen += nla_total_size((entskb->network_header -
312 entskb->mac_header));
313
314 return nlalen;
315}
316
317static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb)
318{
319 struct sk_buff *entskb = entry->skb;
320
321 if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
322 return 0;
323
324 if (skb_vlan_tag_present(entskb)) {
325 struct nlattr *nest;
326
327 nest = nla_nest_start(skb, NFQA_VLAN | NLA_F_NESTED);
328 if (!nest)
329 goto nla_put_failure;
330
331 if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) ||
332 nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto))
333 goto nla_put_failure;
334
335 nla_nest_end(skb, nest);
336 }
337
338 if (entskb->mac_header < entskb->network_header) {
339 int len = (int)(entskb->network_header - entskb->mac_header);
340
341 if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb)))
342 goto nla_put_failure;
343 }
344
345 return 0;
346
347nla_put_failure:
348 return -1;
349}
350
298static struct sk_buff * 351static struct sk_buff *
299nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, 352nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
300 struct nf_queue_entry *entry, 353 struct nf_queue_entry *entry,
@@ -334,6 +387,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
334 if (entskb->tstamp.tv64) 387 if (entskb->tstamp.tv64)
335 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); 388 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
336 389
390 size += nfqnl_get_bridge_size(entry);
391
337 if (entry->state.hook <= NF_INET_FORWARD || 392 if (entry->state.hook <= NF_INET_FORWARD ||
338 (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) 393 (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
339 csum_verify = !skb_csum_unnecessary(entskb); 394 csum_verify = !skb_csum_unnecessary(entskb);
@@ -497,6 +552,9 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
497 } 552 }
498 } 553 }
499 554
555 if (nfqnl_put_bridge(entry, skb) < 0)
556 goto nla_put_failure;
557
500 if (entskb->tstamp.tv64) { 558 if (entskb->tstamp.tv64) {
501 struct nfqnl_msg_packet_timestamp ts; 559 struct nfqnl_msg_packet_timestamp ts;
502 struct timespec64 kts = ktime_to_timespec64(skb->tstamp); 560 struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
@@ -911,12 +969,18 @@ static struct notifier_block nfqnl_rtnl_notifier = {
911 .notifier_call = nfqnl_rcv_nl_event, 969 .notifier_call = nfqnl_rcv_nl_event,
912}; 970};
913 971
972static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = {
973 [NFQA_VLAN_TCI] = { .type = NLA_U16},
974 [NFQA_VLAN_PROTO] = { .type = NLA_U16},
975};
976
914static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { 977static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
915 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, 978 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
916 [NFQA_MARK] = { .type = NLA_U32 }, 979 [NFQA_MARK] = { .type = NLA_U32 },
917 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, 980 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
918 [NFQA_CT] = { .type = NLA_UNSPEC }, 981 [NFQA_CT] = { .type = NLA_UNSPEC },
919 [NFQA_EXP] = { .type = NLA_UNSPEC }, 982 [NFQA_EXP] = { .type = NLA_UNSPEC },
983 [NFQA_VLAN] = { .type = NLA_NESTED },
920}; 984};
921 985
922static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { 986static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -1030,6 +1094,40 @@ static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct,
1030 return ct; 1094 return ct;
1031} 1095}
1032 1096
1097static int nfqa_parse_bridge(struct nf_queue_entry *entry,
1098 const struct nlattr * const nfqa[])
1099{
1100 if (nfqa[NFQA_VLAN]) {
1101 struct nlattr *tb[NFQA_VLAN_MAX + 1];
1102 int err;
1103
1104 err = nla_parse_nested(tb, NFQA_VLAN_MAX, nfqa[NFQA_VLAN],
1105 nfqa_vlan_policy);
1106 if (err < 0)
1107 return err;
1108
1109 if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
1110 return -EINVAL;
1111
1112 entry->skb->vlan_tci = ntohs(nla_get_be16(tb[NFQA_VLAN_TCI]));
1113 entry->skb->vlan_proto = nla_get_be16(tb[NFQA_VLAN_PROTO]);
1114 }
1115
1116 if (nfqa[NFQA_L2HDR]) {
1117 int mac_header_len = entry->skb->network_header -
1118 entry->skb->mac_header;
1119
1120 if (mac_header_len != nla_len(nfqa[NFQA_L2HDR]))
1121 return -EINVAL;
1122 else if (mac_header_len > 0)
1123 memcpy(skb_mac_header(entry->skb),
1124 nla_data(nfqa[NFQA_L2HDR]),
1125 mac_header_len);
1126 }
1127
1128 return 0;
1129}
1130
1033static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl, 1131static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
1034 struct sk_buff *skb, 1132 struct sk_buff *skb,
1035 const struct nlmsghdr *nlh, 1133 const struct nlmsghdr *nlh,
@@ -1045,6 +1143,7 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
1045 struct nfnl_ct_hook *nfnl_ct; 1143 struct nfnl_ct_hook *nfnl_ct;
1046 struct nf_conn *ct = NULL; 1144 struct nf_conn *ct = NULL;
1047 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1145 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1146 int err;
1048 1147
1049 queue = instance_lookup(q, queue_num); 1148 queue = instance_lookup(q, queue_num);
1050 if (!queue) 1149 if (!queue)
@@ -1071,6 +1170,12 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
1071 ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); 1170 ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo);
1072 } 1171 }
1073 1172
1173 if (entry->state.pf == PF_BRIDGE) {
1174 err = nfqa_parse_bridge(entry, nfqa);
1175 if (err < 0)
1176 return err;
1177 }
1178
1074 if (nfqa[NFQA_PAYLOAD]) { 1179 if (nfqa[NFQA_PAYLOAD]) {
1075 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]); 1180 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
1076 int diff = payload_len - entry->skb->len; 1181 int diff = payload_len - entry->skb->len;