aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-10-22 23:21:30 -0400
committerDavid S. Miller <davem@davemloft.net>2018-10-22 23:21:30 -0400
commit807192deb876d7550b5de7bf7a40ea3a4421ae0b (patch)
tree8e9a907d89652e39f271d4de314bb7193e930f0f
parente929ceb66ac0faa3508702efbc5ea028387c6bb1 (diff)
parenta3fb3698cadf27dc142b24394c401625e14d80d0 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net The following patchset contains Netfilter fixes for your net tree: 1) rbtree lookup from control plane returns the left-hand side element of the range when the interval end flag is set on. 2) osf extension is not supported from the input path, reject this from the control plane, from Fernando Fernandez Mancera. 3) xt_TEE is leaving output interface unset due to a recent incorrect netns rework, from Taehee Yoo. 4) xt_TEE allows to select an interface which does not belong to this netnamespace, from Taehee Yoo. 5) Zero private extension area in nft_compat, just like we do in x_tables, otherwise we leak kernel memory to userspace. 6) Missing .checkentry and .destroy entries in new DNAT extensions breaks it since we never load nf_conntrack dependencies, from Paolo Abeni. 7) Do not remove flowtable hook from netns exit path, the netdevice handler already deals with this, also from Taehee Yoo. 8) Only cleanup flowtable entries that reside in this netnamespace, also from Taehee Yoo. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/netfilter/nf_flow_table_core.c9
-rw-r--r--net/netfilter/nf_tables_api.c3
-rw-r--r--net/netfilter/nft_compat.c24
-rw-r--r--net/netfilter/nft_osf.c10
-rw-r--r--net/netfilter/nft_set_rbtree.c10
-rw-r--r--net/netfilter/xt_TEE.c76
-rw-r--r--net/netfilter/xt_nat.c2
7 files changed, 107 insertions, 27 deletions
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index a3cc2ef8a48a..b7a4816add76 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -465,14 +465,17 @@ EXPORT_SYMBOL_GPL(nf_flow_table_init);
465static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data) 465static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
466{ 466{
467 struct net_device *dev = data; 467 struct net_device *dev = data;
468 struct flow_offload_entry *e;
469
470 e = container_of(flow, struct flow_offload_entry, flow);
468 471
469 if (!dev) { 472 if (!dev) {
470 flow_offload_teardown(flow); 473 flow_offload_teardown(flow);
471 return; 474 return;
472 } 475 }
473 476 if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
474 if (flow->tuplehash[0].tuple.iifidx == dev->ifindex || 477 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
475 flow->tuplehash[1].tuple.iifidx == dev->ifindex) 478 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
476 flow_offload_dead(flow); 479 flow_offload_dead(flow);
477} 480}
478 481
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index f0159eea2978..42487d01a3ed 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -7280,9 +7280,6 @@ static void __nft_release_tables(struct net *net)
7280 7280
7281 list_for_each_entry(chain, &table->chains, list) 7281 list_for_each_entry(chain, &table->chains, list)
7282 nf_tables_unregister_hook(net, table, chain); 7282 nf_tables_unregister_hook(net, table, chain);
7283 list_for_each_entry(flowtable, &table->flowtables, list)
7284 nf_unregister_net_hooks(net, flowtable->ops,
7285 flowtable->ops_len);
7286 /* No packets are walking on these chains anymore. */ 7283 /* No packets are walking on these chains anymore. */
7287 ctx.table = table; 7284 ctx.table = table;
7288 list_for_each_entry(chain, &table->chains, list) { 7285 list_for_each_entry(chain, &table->chains, list) {
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 32535eea51b2..768292eac2a4 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -290,6 +290,24 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
290 module_put(target->me); 290 module_put(target->me);
291} 291}
292 292
293static int nft_extension_dump_info(struct sk_buff *skb, int attr,
294 const void *info,
295 unsigned int size, unsigned int user_size)
296{
297 unsigned int info_size, aligned_size = XT_ALIGN(size);
298 struct nlattr *nla;
299
300 nla = nla_reserve(skb, attr, aligned_size);
301 if (!nla)
302 return -1;
303
304 info_size = user_size ? : size;
305 memcpy(nla_data(nla), info, info_size);
306 memset(nla_data(nla) + info_size, 0, aligned_size - info_size);
307
308 return 0;
309}
310
293static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr) 311static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
294{ 312{
295 const struct xt_target *target = expr->ops->data; 313 const struct xt_target *target = expr->ops->data;
@@ -297,7 +315,8 @@ static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
297 315
298 if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) || 316 if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) ||
299 nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) || 317 nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) ||
300 nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(target->targetsize), info)) 318 nft_extension_dump_info(skb, NFTA_TARGET_INFO, info,
319 target->targetsize, target->usersize))
301 goto nla_put_failure; 320 goto nla_put_failure;
302 321
303 return 0; 322 return 0;
@@ -532,7 +551,8 @@ static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr,
532 551
533 if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) || 552 if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
534 nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) || 553 nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) ||
535 nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(match->matchsize), info)) 554 nft_extension_dump_info(skb, NFTA_MATCH_INFO, info,
555 match->matchsize, match->usersize))
536 goto nla_put_failure; 556 goto nla_put_failure;
537 557
538 return 0; 558 return 0;
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 0b452fd470c4..ca5e5d8c5ef8 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -82,6 +82,15 @@ nla_put_failure:
82 return -1; 82 return -1;
83} 83}
84 84
85static int nft_osf_validate(const struct nft_ctx *ctx,
86 const struct nft_expr *expr,
87 const struct nft_data **data)
88{
89 return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
90 (1 << NF_INET_PRE_ROUTING) |
91 (1 << NF_INET_FORWARD));
92}
93
85static struct nft_expr_type nft_osf_type; 94static struct nft_expr_type nft_osf_type;
86static const struct nft_expr_ops nft_osf_op = { 95static const struct nft_expr_ops nft_osf_op = {
87 .eval = nft_osf_eval, 96 .eval = nft_osf_eval,
@@ -89,6 +98,7 @@ static const struct nft_expr_ops nft_osf_op = {
89 .init = nft_osf_init, 98 .init = nft_osf_init,
90 .dump = nft_osf_dump, 99 .dump = nft_osf_dump,
91 .type = &nft_osf_type, 100 .type = &nft_osf_type,
101 .validate = nft_osf_validate,
92}; 102};
93 103
94static struct nft_expr_type nft_osf_type __read_mostly = { 104static struct nft_expr_type nft_osf_type __read_mostly = {
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 0e5ec126f6ad..fa61208371f8 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -135,9 +135,12 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
135 d = memcmp(this, key, set->klen); 135 d = memcmp(this, key, set->klen);
136 if (d < 0) { 136 if (d < 0) {
137 parent = rcu_dereference_raw(parent->rb_left); 137 parent = rcu_dereference_raw(parent->rb_left);
138 interval = rbe; 138 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
139 interval = rbe;
139 } else if (d > 0) { 140 } else if (d > 0) {
140 parent = rcu_dereference_raw(parent->rb_right); 141 parent = rcu_dereference_raw(parent->rb_right);
142 if (flags & NFT_SET_ELEM_INTERVAL_END)
143 interval = rbe;
141 } else { 144 } else {
142 if (!nft_set_elem_active(&rbe->ext, genmask)) 145 if (!nft_set_elem_active(&rbe->ext, genmask))
143 parent = rcu_dereference_raw(parent->rb_left); 146 parent = rcu_dereference_raw(parent->rb_left);
@@ -154,7 +157,10 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
154 157
155 if (set->flags & NFT_SET_INTERVAL && interval != NULL && 158 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
156 nft_set_elem_active(&interval->ext, genmask) && 159 nft_set_elem_active(&interval->ext, genmask) &&
157 !nft_rbtree_interval_end(interval)) { 160 ((!nft_rbtree_interval_end(interval) &&
161 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
162 (nft_rbtree_interval_end(interval) &&
163 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
158 *elem = interval; 164 *elem = interval;
159 return true; 165 return true;
160 } 166 }
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 0d0d68c989df..1dae02a97ee3 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -14,6 +14,8 @@
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <linux/route.h> 15#include <linux/route.h>
16#include <linux/netfilter/x_tables.h> 16#include <linux/netfilter/x_tables.h>
17#include <net/net_namespace.h>
18#include <net/netns/generic.h>
17#include <net/route.h> 19#include <net/route.h>
18#include <net/netfilter/ipv4/nf_dup_ipv4.h> 20#include <net/netfilter/ipv4/nf_dup_ipv4.h>
19#include <net/netfilter/ipv6/nf_dup_ipv6.h> 21#include <net/netfilter/ipv6/nf_dup_ipv6.h>
@@ -25,8 +27,15 @@ struct xt_tee_priv {
25 int oif; 27 int oif;
26}; 28};
27 29
30static unsigned int tee_net_id __read_mostly;
28static const union nf_inet_addr tee_zero_address; 31static const union nf_inet_addr tee_zero_address;
29 32
33struct tee_net {
34 struct list_head priv_list;
35 /* lock protects the priv_list */
36 struct mutex lock;
37};
38
30static unsigned int 39static unsigned int
31tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) 40tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
32{ 41{
@@ -51,17 +60,16 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
51} 60}
52#endif 61#endif
53 62
54static DEFINE_MUTEX(priv_list_mutex);
55static LIST_HEAD(priv_list);
56
57static int tee_netdev_event(struct notifier_block *this, unsigned long event, 63static int tee_netdev_event(struct notifier_block *this, unsigned long event,
58 void *ptr) 64 void *ptr)
59{ 65{
60 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 66 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
67 struct net *net = dev_net(dev);
68 struct tee_net *tn = net_generic(net, tee_net_id);
61 struct xt_tee_priv *priv; 69 struct xt_tee_priv *priv;
62 70
63 mutex_lock(&priv_list_mutex); 71 mutex_lock(&tn->lock);
64 list_for_each_entry(priv, &priv_list, list) { 72 list_for_each_entry(priv, &tn->priv_list, list) {
65 switch (event) { 73 switch (event) {
66 case NETDEV_REGISTER: 74 case NETDEV_REGISTER:
67 if (!strcmp(dev->name, priv->tginfo->oif)) 75 if (!strcmp(dev->name, priv->tginfo->oif))
@@ -79,13 +87,14 @@ static int tee_netdev_event(struct notifier_block *this, unsigned long event,
79 break; 87 break;
80 } 88 }
81 } 89 }
82 mutex_unlock(&priv_list_mutex); 90 mutex_unlock(&tn->lock);
83 91
84 return NOTIFY_DONE; 92 return NOTIFY_DONE;
85} 93}
86 94
87static int tee_tg_check(const struct xt_tgchk_param *par) 95static int tee_tg_check(const struct xt_tgchk_param *par)
88{ 96{
97 struct tee_net *tn = net_generic(par->net, tee_net_id);
89 struct xt_tee_tginfo *info = par->targinfo; 98 struct xt_tee_tginfo *info = par->targinfo;
90 struct xt_tee_priv *priv; 99 struct xt_tee_priv *priv;
91 100
@@ -95,6 +104,8 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
95 return -EINVAL; 104 return -EINVAL;
96 105
97 if (info->oif[0]) { 106 if (info->oif[0]) {
107 struct net_device *dev;
108
98 if (info->oif[sizeof(info->oif)-1] != '\0') 109 if (info->oif[sizeof(info->oif)-1] != '\0')
99 return -EINVAL; 110 return -EINVAL;
100 111
@@ -106,9 +117,14 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
106 priv->oif = -1; 117 priv->oif = -1;
107 info->priv = priv; 118 info->priv = priv;
108 119
109 mutex_lock(&priv_list_mutex); 120 dev = dev_get_by_name(par->net, info->oif);
110 list_add(&priv->list, &priv_list); 121 if (dev) {
111 mutex_unlock(&priv_list_mutex); 122 priv->oif = dev->ifindex;
123 dev_put(dev);
124 }
125 mutex_lock(&tn->lock);
126 list_add(&priv->list, &tn->priv_list);
127 mutex_unlock(&tn->lock);
112 } else 128 } else
113 info->priv = NULL; 129 info->priv = NULL;
114 130
@@ -118,12 +134,13 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
118 134
119static void tee_tg_destroy(const struct xt_tgdtor_param *par) 135static void tee_tg_destroy(const struct xt_tgdtor_param *par)
120{ 136{
137 struct tee_net *tn = net_generic(par->net, tee_net_id);
121 struct xt_tee_tginfo *info = par->targinfo; 138 struct xt_tee_tginfo *info = par->targinfo;
122 139
123 if (info->priv) { 140 if (info->priv) {
124 mutex_lock(&priv_list_mutex); 141 mutex_lock(&tn->lock);
125 list_del(&info->priv->list); 142 list_del(&info->priv->list);
126 mutex_unlock(&priv_list_mutex); 143 mutex_unlock(&tn->lock);
127 kfree(info->priv); 144 kfree(info->priv);
128 } 145 }
129 static_key_slow_dec(&xt_tee_enabled); 146 static_key_slow_dec(&xt_tee_enabled);
@@ -156,6 +173,21 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
156#endif 173#endif
157}; 174};
158 175
176static int __net_init tee_net_init(struct net *net)
177{
178 struct tee_net *tn = net_generic(net, tee_net_id);
179
180 INIT_LIST_HEAD(&tn->priv_list);
181 mutex_init(&tn->lock);
182 return 0;
183}
184
185static struct pernet_operations tee_net_ops = {
186 .init = tee_net_init,
187 .id = &tee_net_id,
188 .size = sizeof(struct tee_net),
189};
190
159static struct notifier_block tee_netdev_notifier = { 191static struct notifier_block tee_netdev_notifier = {
160 .notifier_call = tee_netdev_event, 192 .notifier_call = tee_netdev_event,
161}; 193};
@@ -164,22 +196,32 @@ static int __init tee_tg_init(void)
164{ 196{
165 int ret; 197 int ret;
166 198
167 ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg)); 199 ret = register_pernet_subsys(&tee_net_ops);
168 if (ret) 200 if (ret < 0)
169 return ret; 201 return ret;
202
203 ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
204 if (ret < 0)
205 goto cleanup_subsys;
206
170 ret = register_netdevice_notifier(&tee_netdev_notifier); 207 ret = register_netdevice_notifier(&tee_netdev_notifier);
171 if (ret) { 208 if (ret < 0)
172 xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg)); 209 goto unregister_targets;
173 return ret;
174 }
175 210
176 return 0; 211 return 0;
212
213unregister_targets:
214 xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
215cleanup_subsys:
216 unregister_pernet_subsys(&tee_net_ops);
217 return ret;
177} 218}
178 219
179static void __exit tee_tg_exit(void) 220static void __exit tee_tg_exit(void)
180{ 221{
181 unregister_netdevice_notifier(&tee_netdev_notifier); 222 unregister_netdevice_notifier(&tee_netdev_notifier);
182 xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg)); 223 xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
224 unregister_pernet_subsys(&tee_net_ops);
183} 225}
184 226
185module_init(tee_tg_init); 227module_init(tee_tg_init);
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
index 8af9707f8789..ac91170fc8c8 100644
--- a/net/netfilter/xt_nat.c
+++ b/net/netfilter/xt_nat.c
@@ -216,6 +216,8 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
216 { 216 {
217 .name = "DNAT", 217 .name = "DNAT",
218 .revision = 2, 218 .revision = 2,
219 .checkentry = xt_nat_checkentry,
220 .destroy = xt_nat_destroy,
219 .target = xt_dnat_target_v2, 221 .target = xt_dnat_target_v2,
220 .targetsize = sizeof(struct nf_nat_range2), 222 .targetsize = sizeof(struct nf_nat_range2),
221 .table = "nat", 223 .table = "nat",