diff options
author | David S. Miller <davem@davemloft.net> | 2017-09-20 19:08:23 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-09-20 19:08:23 -0400 |
commit | 4c4d11b913ccef0eee8d32e5974ae26617676b77 (patch) | |
tree | 683001bde3d4df6b9eaa463bf050d530d92ed511 /net | |
parent | 02388bf87f72e1d47174cd8f81c34443920eb5a0 (diff) | |
parent | 7f4f7dd4417d9efd038b14d39c70170db2e0baa0 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says:
====================
Netfilter fixes for net
The following patchset contains two Netfilter fixes for your net tree,
they are:
1) Fix NAt compilation with UP, from Geert Uytterhoeven.
2) Fix incorrect number of entries when dumping a set, from
Vishwanath Pai.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/netfilter/ipset/ip_set_hash_gen.h | 14 | ||||
-rw-r--r-- | net/netfilter/nf_nat_core.c | 12 |
2 files changed, 19 insertions, 7 deletions
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index f236c0bc7b3f..51063d9ed0f7 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h | |||
@@ -1041,12 +1041,24 @@ out: | |||
1041 | static int | 1041 | static int |
1042 | mtype_head(struct ip_set *set, struct sk_buff *skb) | 1042 | mtype_head(struct ip_set *set, struct sk_buff *skb) |
1043 | { | 1043 | { |
1044 | const struct htype *h = set->data; | 1044 | struct htype *h = set->data; |
1045 | const struct htable *t; | 1045 | const struct htable *t; |
1046 | struct nlattr *nested; | 1046 | struct nlattr *nested; |
1047 | size_t memsize; | 1047 | size_t memsize; |
1048 | u8 htable_bits; | 1048 | u8 htable_bits; |
1049 | 1049 | ||
1050 | /* If any members have expired, set->elements will be wrong | ||
1051 | * mytype_expire function will update it with the right count. | ||
1052 | * we do not hold set->lock here, so grab it first. | ||
1053 | * set->elements can still be incorrect in the case of a huge set, | ||
1054 | * because elements might time out during the listing. | ||
1055 | */ | ||
1056 | if (SET_WITH_TIMEOUT(set)) { | ||
1057 | spin_lock_bh(&set->lock); | ||
1058 | mtype_expire(set, h); | ||
1059 | spin_unlock_bh(&set->lock); | ||
1060 | } | ||
1061 | |||
1050 | rcu_read_lock_bh(); | 1062 | rcu_read_lock_bh(); |
1051 | t = rcu_dereference_bh_nfnl(h->table); | 1063 | t = rcu_dereference_bh_nfnl(h->table); |
1052 | memsize = mtype_ahash_memsize(h, t) + set->ext_size; | 1064 | memsize = mtype_ahash_memsize(h, t) + set->ext_size; |
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index f393a7086025..af8345fc4fbd 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c | |||
@@ -429,7 +429,7 @@ nf_nat_setup_info(struct nf_conn *ct, | |||
429 | 429 | ||
430 | srchash = hash_by_src(net, | 430 | srchash = hash_by_src(net, |
431 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 431 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
432 | lock = &nf_nat_locks[srchash % ARRAY_SIZE(nf_nat_locks)]; | 432 | lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS]; |
433 | spin_lock_bh(lock); | 433 | spin_lock_bh(lock); |
434 | hlist_add_head_rcu(&ct->nat_bysource, | 434 | hlist_add_head_rcu(&ct->nat_bysource, |
435 | &nf_nat_bysource[srchash]); | 435 | &nf_nat_bysource[srchash]); |
@@ -532,9 +532,9 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct) | |||
532 | unsigned int h; | 532 | unsigned int h; |
533 | 533 | ||
534 | h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 534 | h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
535 | spin_lock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]); | 535 | spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); |
536 | hlist_del_rcu(&ct->nat_bysource); | 536 | hlist_del_rcu(&ct->nat_bysource); |
537 | spin_unlock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]); | 537 | spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); |
538 | } | 538 | } |
539 | 539 | ||
540 | static int nf_nat_proto_clean(struct nf_conn *ct, void *data) | 540 | static int nf_nat_proto_clean(struct nf_conn *ct, void *data) |
@@ -807,8 +807,8 @@ static int __init nf_nat_init(void) | |||
807 | 807 | ||
808 | /* Leave them the same for the moment. */ | 808 | /* Leave them the same for the moment. */ |
809 | nf_nat_htable_size = nf_conntrack_htable_size; | 809 | nf_nat_htable_size = nf_conntrack_htable_size; |
810 | if (nf_nat_htable_size < ARRAY_SIZE(nf_nat_locks)) | 810 | if (nf_nat_htable_size < CONNTRACK_LOCKS) |
811 | nf_nat_htable_size = ARRAY_SIZE(nf_nat_locks); | 811 | nf_nat_htable_size = CONNTRACK_LOCKS; |
812 | 812 | ||
813 | nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0); | 813 | nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0); |
814 | if (!nf_nat_bysource) | 814 | if (!nf_nat_bysource) |
@@ -821,7 +821,7 @@ static int __init nf_nat_init(void) | |||
821 | return ret; | 821 | return ret; |
822 | } | 822 | } |
823 | 823 | ||
824 | for (i = 0; i < ARRAY_SIZE(nf_nat_locks); i++) | 824 | for (i = 0; i < CONNTRACK_LOCKS; i++) |
825 | spin_lock_init(&nf_nat_locks[i]); | 825 | spin_lock_init(&nf_nat_locks[i]); |
826 | 826 | ||
827 | nf_ct_helper_expectfn_register(&follow_master_nat); | 827 | nf_ct_helper_expectfn_register(&follow_master_nat); |