diff options
author | David S. Miller <davem@davemloft.net> | 2014-09-26 16:21:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-26 16:21:29 -0400 |
commit | e7af85db54430c7cb7e15de9b3e0f72074d94dfb (patch) | |
tree | 4827f6435c305b95ae7a1b46d3a8ab230291d25a | |
parent | 445f7f4d62628cb2971db884084d162ecb622ec7 (diff) | |
parent | 679ab4ddbdfab8af39104e63819db71f428aefd9 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says:
====================
nf pull request for net
This series contains netfilter fixes for net, they are:
1) Fix lockdep splat in nft_hash when releasing sets from the
rcu_callback context. We don't the mutex there anymore.
2) Remove unnecessary spinlock_bh in the destroy path of the nf_tables
rbtree set type from rcu_callback context.
3) Fix another lockdep splat in rhashtable. None of the callers hold
a mutex when calling rhashtable_destroy.
4) Fix duplicated error reporting from nfnetlink when aborting and
replaying a batch.
5) Fix a Kconfig issue reported by kbuild robot.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | lib/rhashtable.c | 8 | ||||
-rw-r--r-- | net/netfilter/Kconfig | 1 | ||||
-rw-r--r-- | net/netfilter/nfnetlink.c | 64 | ||||
-rw-r--r-- | net/netfilter/nft_hash.c | 12 | ||||
-rw-r--r-- | net/netfilter/nft_rbtree.c | 2 |
5 files changed, 75 insertions, 12 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 7b36e4d40ed7..16d02639d334 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -588,13 +588,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init); | |||
588 | * rhashtable_destroy - destroy hash table | 588 | * rhashtable_destroy - destroy hash table |
589 | * @ht: the hash table to destroy | 589 | * @ht: the hash table to destroy |
590 | * | 590 | * |
591 | * Frees the bucket array. | 591 | * Frees the bucket array. This function is not rcu safe, therefore the caller |
592 | * has to make sure that no resizing may happen by unpublishing the hashtable | ||
593 | * and waiting for the quiescent cycle before releasing the bucket array. | ||
592 | */ | 594 | */ |
593 | void rhashtable_destroy(const struct rhashtable *ht) | 595 | void rhashtable_destroy(const struct rhashtable *ht) |
594 | { | 596 | { |
595 | const struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 597 | bucket_table_free(ht->tbl); |
596 | |||
597 | bucket_table_free(tbl); | ||
598 | } | 598 | } |
599 | EXPORT_SYMBOL_GPL(rhashtable_destroy); | 599 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |
600 | 600 | ||
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index b5c1d3aadb41..6d77cce481d5 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -847,6 +847,7 @@ config NETFILTER_XT_TARGET_TPROXY | |||
847 | tristate '"TPROXY" target transparent proxying support' | 847 | tristate '"TPROXY" target transparent proxying support' |
848 | depends on NETFILTER_XTABLES | 848 | depends on NETFILTER_XTABLES |
849 | depends on NETFILTER_ADVANCED | 849 | depends on NETFILTER_ADVANCED |
850 | depends on (IPV6 || IPV6=n) | ||
850 | depends on IP_NF_MANGLE | 851 | depends on IP_NF_MANGLE |
851 | select NF_DEFRAG_IPV4 | 852 | select NF_DEFRAG_IPV4 |
852 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES | 853 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index c138b8fbe280..f37f0716a9fc 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
@@ -222,6 +222,51 @@ replay: | |||
222 | } | 222 | } |
223 | } | 223 | } |
224 | 224 | ||
225 | struct nfnl_err { | ||
226 | struct list_head head; | ||
227 | struct nlmsghdr *nlh; | ||
228 | int err; | ||
229 | }; | ||
230 | |||
231 | static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err) | ||
232 | { | ||
233 | struct nfnl_err *nfnl_err; | ||
234 | |||
235 | nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL); | ||
236 | if (nfnl_err == NULL) | ||
237 | return -ENOMEM; | ||
238 | |||
239 | nfnl_err->nlh = nlh; | ||
240 | nfnl_err->err = err; | ||
241 | list_add_tail(&nfnl_err->head, list); | ||
242 | |||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | static void nfnl_err_del(struct nfnl_err *nfnl_err) | ||
247 | { | ||
248 | list_del(&nfnl_err->head); | ||
249 | kfree(nfnl_err); | ||
250 | } | ||
251 | |||
252 | static void nfnl_err_reset(struct list_head *err_list) | ||
253 | { | ||
254 | struct nfnl_err *nfnl_err, *next; | ||
255 | |||
256 | list_for_each_entry_safe(nfnl_err, next, err_list, head) | ||
257 | nfnl_err_del(nfnl_err); | ||
258 | } | ||
259 | |||
260 | static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb) | ||
261 | { | ||
262 | struct nfnl_err *nfnl_err, *next; | ||
263 | |||
264 | list_for_each_entry_safe(nfnl_err, next, err_list, head) { | ||
265 | netlink_ack(skb, nfnl_err->nlh, nfnl_err->err); | ||
266 | nfnl_err_del(nfnl_err); | ||
267 | } | ||
268 | } | ||
269 | |||
225 | static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, | 270 | static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, |
226 | u_int16_t subsys_id) | 271 | u_int16_t subsys_id) |
227 | { | 272 | { |
@@ -230,6 +275,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
230 | const struct nfnetlink_subsystem *ss; | 275 | const struct nfnetlink_subsystem *ss; |
231 | const struct nfnl_callback *nc; | 276 | const struct nfnl_callback *nc; |
232 | bool success = true, done = false; | 277 | bool success = true, done = false; |
278 | static LIST_HEAD(err_list); | ||
233 | int err; | 279 | int err; |
234 | 280 | ||
235 | if (subsys_id >= NFNL_SUBSYS_COUNT) | 281 | if (subsys_id >= NFNL_SUBSYS_COUNT) |
@@ -287,6 +333,7 @@ replay: | |||
287 | type = nlh->nlmsg_type; | 333 | type = nlh->nlmsg_type; |
288 | if (type == NFNL_MSG_BATCH_BEGIN) { | 334 | if (type == NFNL_MSG_BATCH_BEGIN) { |
289 | /* Malformed: Batch begin twice */ | 335 | /* Malformed: Batch begin twice */ |
336 | nfnl_err_reset(&err_list); | ||
290 | success = false; | 337 | success = false; |
291 | goto done; | 338 | goto done; |
292 | } else if (type == NFNL_MSG_BATCH_END) { | 339 | } else if (type == NFNL_MSG_BATCH_END) { |
@@ -333,6 +380,7 @@ replay: | |||
333 | * original skb. | 380 | * original skb. |
334 | */ | 381 | */ |
335 | if (err == -EAGAIN) { | 382 | if (err == -EAGAIN) { |
383 | nfnl_err_reset(&err_list); | ||
336 | ss->abort(skb); | 384 | ss->abort(skb); |
337 | nfnl_unlock(subsys_id); | 385 | nfnl_unlock(subsys_id); |
338 | kfree_skb(nskb); | 386 | kfree_skb(nskb); |
@@ -341,11 +389,24 @@ replay: | |||
341 | } | 389 | } |
342 | ack: | 390 | ack: |
343 | if (nlh->nlmsg_flags & NLM_F_ACK || err) { | 391 | if (nlh->nlmsg_flags & NLM_F_ACK || err) { |
392 | /* Errors are delivered once the full batch has been | ||
393 | * processed, this avoids that the same error is | ||
394 | * reported several times when replaying the batch. | ||
395 | */ | ||
396 | if (nfnl_err_add(&err_list, nlh, err) < 0) { | ||
397 | /* We failed to enqueue an error, reset the | ||
398 | * list of errors and send OOM to userspace | ||
399 | * pointing to the batch header. | ||
400 | */ | ||
401 | nfnl_err_reset(&err_list); | ||
402 | netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); | ||
403 | success = false; | ||
404 | goto done; | ||
405 | } | ||
344 | /* We don't stop processing the batch on errors, thus, | 406 | /* We don't stop processing the batch on errors, thus, |
345 | * userspace gets all the errors that the batch | 407 | * userspace gets all the errors that the batch |
346 | * triggers. | 408 | * triggers. |
347 | */ | 409 | */ |
348 | netlink_ack(skb, nlh, err); | ||
349 | if (err) | 410 | if (err) |
350 | success = false; | 411 | success = false; |
351 | } | 412 | } |
@@ -361,6 +422,7 @@ done: | |||
361 | else | 422 | else |
362 | ss->abort(skb); | 423 | ss->abort(skb); |
363 | 424 | ||
425 | nfnl_err_deliver(&err_list, oskb); | ||
364 | nfnl_unlock(subsys_id); | 426 | nfnl_unlock(subsys_id); |
365 | kfree_skb(nskb); | 427 | kfree_skb(nskb); |
366 | } | 428 | } |
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 28fb8f38e6ba..8892b7b6184a 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c | |||
@@ -180,15 +180,17 @@ static int nft_hash_init(const struct nft_set *set, | |||
180 | static void nft_hash_destroy(const struct nft_set *set) | 180 | static void nft_hash_destroy(const struct nft_set *set) |
181 | { | 181 | { |
182 | const struct rhashtable *priv = nft_set_priv(set); | 182 | const struct rhashtable *priv = nft_set_priv(set); |
183 | const struct bucket_table *tbl; | 183 | const struct bucket_table *tbl = priv->tbl; |
184 | struct nft_hash_elem *he, *next; | 184 | struct nft_hash_elem *he, *next; |
185 | unsigned int i; | 185 | unsigned int i; |
186 | 186 | ||
187 | tbl = rht_dereference(priv->tbl, priv); | 187 | for (i = 0; i < tbl->size; i++) { |
188 | for (i = 0; i < tbl->size; i++) | 188 | for (he = rht_entry(tbl->buckets[i], struct nft_hash_elem, node); |
189 | rht_for_each_entry_safe(he, next, tbl->buckets[i], priv, node) | 189 | he != NULL; he = next) { |
190 | next = rht_entry(he->node.next, struct nft_hash_elem, node); | ||
190 | nft_hash_elem_destroy(set, he); | 191 | nft_hash_elem_destroy(set, he); |
191 | 192 | } | |
193 | } | ||
192 | rhashtable_destroy(priv); | 194 | rhashtable_destroy(priv); |
193 | } | 195 | } |
194 | 196 | ||
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c index e1836ff88199..46214f245665 100644 --- a/net/netfilter/nft_rbtree.c +++ b/net/netfilter/nft_rbtree.c | |||
@@ -234,13 +234,11 @@ static void nft_rbtree_destroy(const struct nft_set *set) | |||
234 | struct nft_rbtree_elem *rbe; | 234 | struct nft_rbtree_elem *rbe; |
235 | struct rb_node *node; | 235 | struct rb_node *node; |
236 | 236 | ||
237 | spin_lock_bh(&nft_rbtree_lock); | ||
238 | while ((node = priv->root.rb_node) != NULL) { | 237 | while ((node = priv->root.rb_node) != NULL) { |
239 | rb_erase(node, &priv->root); | 238 | rb_erase(node, &priv->root); |
240 | rbe = rb_entry(node, struct nft_rbtree_elem, node); | 239 | rbe = rb_entry(node, struct nft_rbtree_elem, node); |
241 | nft_rbtree_elem_destroy(set, rbe); | 240 | nft_rbtree_elem_destroy(set, rbe); |
242 | } | 241 | } |
243 | spin_unlock_bh(&nft_rbtree_lock); | ||
244 | } | 242 | } |
245 | 243 | ||
246 | static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, | 244 | static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, |