aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-09-08 14:35:55 -0400
committerDavid S. Miller <davem@davemloft.net>2017-09-08 14:35:55 -0400
commit108074611015bccfaf9ef50710edfd6929e55cd3 (patch)
treeb2730fecfa212ffd14fc4f5526bb95fdb166b43f
parent91aac5637fc0f1db54edcccfc96a00879ec86411 (diff)
parent90c4ae4e2c1da9f1eaf846136861af43d4c1ff34 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter/IPVS fixes for net The following patchset contains Netfilter/IPVS fixes for your net tree, they are: 1) Fix SCTP connection setup when IPVS module is loaded and any scheduler is registered, from Xin Long. 2) Don't create a SCTP connection from SCTP ABORT packets, also from Xin Long. 3) WARN_ON() and drop packet, instead of BUG_ON() races when calling nf_nat_setup_info(). This is specifically a longstanding problem when br_netfilter with conntrack support is in place, patch from Florian Westphal. 4) Avoid softlock splats via iptables-restore, also from Florian. 5) Revert NAT hashtable conversion to rhashtable, semantics of rhlist are different from our simple NAT hashtable, this has been causing problems in the recent Linux kernel releases. From Florian. 6) Add per-bucket spinlock for NAT hashtable, so at least we restore one of the benefits we got from the previous rhashtable conversion. 7) Fix incorrect hashtable size in memory allocation in xt_hashlimit, from Zhizhou Tian. 8) Fix build/link problems with hashlimit and 32-bit arches, to address recent fallout from a new hashlimit mode, from Vishwanath Pai. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/netfilter/nf_conntrack.h3
-rw-r--r--include/net/netfilter/nf_nat.h1
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c8
-rw-r--r--net/netfilter/nf_nat_core.c146
-rw-r--r--net/netfilter/xt_hashlimit.c16
9 files changed, 88 insertions, 91 deletions
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index fdc9c64a1c94..8f3bd30511de 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -17,7 +17,6 @@
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <linux/rhashtable.h>
21 20
22#include <linux/netfilter/nf_conntrack_tcp.h> 21#include <linux/netfilter/nf_conntrack_tcp.h>
23#include <linux/netfilter/nf_conntrack_dccp.h> 22#include <linux/netfilter/nf_conntrack_dccp.h>
@@ -77,7 +76,7 @@ struct nf_conn {
77 possible_net_t ct_net; 76 possible_net_t ct_net;
78 77
79#if IS_ENABLED(CONFIG_NF_NAT) 78#if IS_ENABLED(CONFIG_NF_NAT)
80 struct rhlist_head nat_bysource; 79 struct hlist_node nat_bysource;
81#endif 80#endif
82 /* all members below initialized via memset */ 81 /* all members below initialized via memset */
83 u8 __nfct_init_offset[0]; 82 u8 __nfct_init_offset[0];
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 05c82a1a4267..b71701302e61 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -1,6 +1,5 @@
1#ifndef _NF_NAT_H 1#ifndef _NF_NAT_H
2#define _NF_NAT_H 2#define _NF_NAT_H
3#include <linux/rhashtable.h>
4#include <linux/netfilter_ipv4.h> 3#include <linux/netfilter_ipv4.h>
5#include <linux/netfilter/nf_nat.h> 4#include <linux/netfilter/nf_nat.h>
6#include <net/netfilter/nf_conntrack_tuple.h> 5#include <net/netfilter/nf_conntrack_tuple.h>
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index e04457198f93..9e2770fd00be 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -629,6 +629,7 @@ static void get_counters(const struct xt_table_info *t,
629 629
630 ADD_COUNTER(counters[i], bcnt, pcnt); 630 ADD_COUNTER(counters[i], bcnt, pcnt);
631 ++i; 631 ++i;
632 cond_resched();
632 } 633 }
633 } 634 }
634} 635}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 576cba2b57e9..39286e543ee6 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -776,6 +776,7 @@ get_counters(const struct xt_table_info *t,
776 776
777 ADD_COUNTER(counters[i], bcnt, pcnt); 777 ADD_COUNTER(counters[i], bcnt, pcnt);
778 ++i; /* macro does multi eval of i */ 778 ++i; /* macro does multi eval of i */
779 cond_resched();
779 } 780 }
780 } 781 }
781} 782}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 54b1e75eded1..01bd3ee5ebc6 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -795,6 +795,7 @@ get_counters(const struct xt_table_info *t,
795 795
796 ADD_COUNTER(counters[i], bcnt, pcnt); 796 ADD_COUNTER(counters[i], bcnt, pcnt);
797 ++i; 797 ++i;
798 cond_resched();
798 } 799 }
799 } 800 }
800} 801}
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 04fe25abc5f6..52cd2901a097 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -215,7 +215,7 @@ static void *__nf_hook_entries_try_shrink(struct nf_hook_entries __rcu **pp)
215 if (skip == hook_entries) 215 if (skip == hook_entries)
216 goto out_assign; 216 goto out_assign;
217 217
218 if (WARN_ON(skip == 0)) 218 if (skip == 0)
219 return NULL; 219 return NULL;
220 220
221 hook_entries -= skip; 221 hook_entries -= skip;
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index e1efa446b305..57c8ee66491e 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -24,9 +24,13 @@ sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
24 if (sh) { 24 if (sh) {
25 sch = skb_header_pointer(skb, iph->len + sizeof(_sctph), 25 sch = skb_header_pointer(skb, iph->len + sizeof(_sctph),
26 sizeof(_schunkh), &_schunkh); 26 sizeof(_schunkh), &_schunkh);
27 if (sch && (sch->type == SCTP_CID_INIT || 27 if (sch) {
28 sysctl_sloppy_sctp(ipvs))) 28 if (sch->type == SCTP_CID_ABORT ||
29 !(sysctl_sloppy_sctp(ipvs) ||
30 sch->type == SCTP_CID_INIT))
31 return 1;
29 ports = &sh->source; 32 ports = &sh->source;
33 }
30 } 34 }
31 } else { 35 } else {
32 ports = skb_header_pointer( 36 ports = skb_header_pointer(
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 40573aa6c133..f393a7086025 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -30,19 +30,17 @@
30#include <net/netfilter/nf_conntrack_zones.h> 30#include <net/netfilter/nf_conntrack_zones.h>
31#include <linux/netfilter/nf_nat.h> 31#include <linux/netfilter/nf_nat.h>
32 32
33static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
34
33static DEFINE_MUTEX(nf_nat_proto_mutex); 35static DEFINE_MUTEX(nf_nat_proto_mutex);
34static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] 36static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
35 __read_mostly; 37 __read_mostly;
36static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] 38static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
37 __read_mostly; 39 __read_mostly;
38 40
39struct nf_nat_conn_key { 41static struct hlist_head *nf_nat_bysource __read_mostly;
40 const struct net *net; 42static unsigned int nf_nat_htable_size __read_mostly;
41 const struct nf_conntrack_tuple *tuple; 43static unsigned int nf_nat_hash_rnd __read_mostly;
42 const struct nf_conntrack_zone *zone;
43};
44
45static struct rhltable nf_nat_bysource_table;
46 44
47inline const struct nf_nat_l3proto * 45inline const struct nf_nat_l3proto *
48__nf_nat_l3proto_find(u8 family) 46__nf_nat_l3proto_find(u8 family)
@@ -118,17 +116,19 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
118EXPORT_SYMBOL(nf_xfrm_me_harder); 116EXPORT_SYMBOL(nf_xfrm_me_harder);
119#endif /* CONFIG_XFRM */ 117#endif /* CONFIG_XFRM */
120 118
121static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed) 119/* We keep an extra hash for each conntrack, for fast searching. */
120static unsigned int
121hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
122{ 122{
123 const struct nf_conntrack_tuple *t; 123 unsigned int hash;
124 const struct nf_conn *ct = data; 124
125 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
125 126
126 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
127 /* Original src, to ensure we map it consistently if poss. */ 127 /* Original src, to ensure we map it consistently if poss. */
128 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
129 tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
128 130
129 seed ^= net_hash_mix(nf_ct_net(ct)); 131 return reciprocal_scale(hash, nf_nat_htable_size);
130 return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32),
131 t->dst.protonum ^ seed);
132} 132}
133 133
134/* Is this tuple already taken? (not by us) */ 134/* Is this tuple already taken? (not by us) */
@@ -184,28 +184,6 @@ same_src(const struct nf_conn *ct,
184 t->src.u.all == tuple->src.u.all); 184 t->src.u.all == tuple->src.u.all);
185} 185}
186 186
187static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
188 const void *obj)
189{
190 const struct nf_nat_conn_key *key = arg->key;
191 const struct nf_conn *ct = obj;
192
193 if (!same_src(ct, key->tuple) ||
194 !net_eq(nf_ct_net(ct), key->net) ||
195 !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
196 return 1;
197
198 return 0;
199}
200
201static struct rhashtable_params nf_nat_bysource_params = {
202 .head_offset = offsetof(struct nf_conn, nat_bysource),
203 .obj_hashfn = nf_nat_bysource_hash,
204 .obj_cmpfn = nf_nat_bysource_cmp,
205 .nelem_hint = 256,
206 .min_size = 1024,
207};
208
209/* Only called for SRC manip */ 187/* Only called for SRC manip */
210static int 188static int
211find_appropriate_src(struct net *net, 189find_appropriate_src(struct net *net,
@@ -216,26 +194,22 @@ find_appropriate_src(struct net *net,
216 struct nf_conntrack_tuple *result, 194 struct nf_conntrack_tuple *result,
217 const struct nf_nat_range *range) 195 const struct nf_nat_range *range)
218{ 196{
197 unsigned int h = hash_by_src(net, tuple);
219 const struct nf_conn *ct; 198 const struct nf_conn *ct;
220 struct nf_nat_conn_key key = {
221 .net = net,
222 .tuple = tuple,
223 .zone = zone
224 };
225 struct rhlist_head *hl, *h;
226
227 hl = rhltable_lookup(&nf_nat_bysource_table, &key,
228 nf_nat_bysource_params);
229
230 rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
231 nf_ct_invert_tuplepr(result,
232 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
233 result->dst = tuple->dst;
234 199
235 if (in_range(l3proto, l4proto, result, range)) 200 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
236 return 1; 201 if (same_src(ct, tuple) &&
202 net_eq(net, nf_ct_net(ct)) &&
203 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
204 /* Copy source part from reply tuple. */
205 nf_ct_invert_tuplepr(result,
206 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
207 result->dst = tuple->dst;
208
209 if (in_range(l3proto, l4proto, result, range))
210 return 1;
211 }
237 } 212 }
238
239 return 0; 213 return 0;
240} 214}
241 215
@@ -408,6 +382,7 @@ nf_nat_setup_info(struct nf_conn *ct,
408 const struct nf_nat_range *range, 382 const struct nf_nat_range *range,
409 enum nf_nat_manip_type maniptype) 383 enum nf_nat_manip_type maniptype)
410{ 384{
385 struct net *net = nf_ct_net(ct);
411 struct nf_conntrack_tuple curr_tuple, new_tuple; 386 struct nf_conntrack_tuple curr_tuple, new_tuple;
412 387
413 /* Can't setup nat info for confirmed ct. */ 388 /* Can't setup nat info for confirmed ct. */
@@ -416,7 +391,9 @@ nf_nat_setup_info(struct nf_conn *ct,
416 391
417 WARN_ON(maniptype != NF_NAT_MANIP_SRC && 392 WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
418 maniptype != NF_NAT_MANIP_DST); 393 maniptype != NF_NAT_MANIP_DST);
419 BUG_ON(nf_nat_initialized(ct, maniptype)); 394
395 if (WARN_ON(nf_nat_initialized(ct, maniptype)))
396 return NF_DROP;
420 397
421 /* What we've got will look like inverse of reply. Normally 398 /* What we've got will look like inverse of reply. Normally
422 * this is what is in the conntrack, except for prior 399 * this is what is in the conntrack, except for prior
@@ -447,19 +424,16 @@ nf_nat_setup_info(struct nf_conn *ct,
447 } 424 }
448 425
449 if (maniptype == NF_NAT_MANIP_SRC) { 426 if (maniptype == NF_NAT_MANIP_SRC) {
450 struct nf_nat_conn_key key = { 427 unsigned int srchash;
451 .net = nf_ct_net(ct), 428 spinlock_t *lock;
452 .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 429
453 .zone = nf_ct_zone(ct), 430 srchash = hash_by_src(net,
454 }; 431 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
455 int err; 432 lock = &nf_nat_locks[srchash % ARRAY_SIZE(nf_nat_locks)];
456 433 spin_lock_bh(lock);
457 err = rhltable_insert_key(&nf_nat_bysource_table, 434 hlist_add_head_rcu(&ct->nat_bysource,
458 &key, 435 &nf_nat_bysource[srchash]);
459 &ct->nat_bysource, 436 spin_unlock_bh(lock);
460 nf_nat_bysource_params);
461 if (err)
462 return NF_DROP;
463 } 437 }
464 438
465 /* It's done. */ 439 /* It's done. */
@@ -553,6 +527,16 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
553 return i->status & IPS_NAT_MASK ? 1 : 0; 527 return i->status & IPS_NAT_MASK ? 1 : 0;
554} 528}
555 529
530static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
531{
532 unsigned int h;
533
534 h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
535 spin_lock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
536 hlist_del_rcu(&ct->nat_bysource);
537 spin_unlock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
538}
539
556static int nf_nat_proto_clean(struct nf_conn *ct, void *data) 540static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
557{ 541{
558 if (nf_nat_proto_remove(ct, data)) 542 if (nf_nat_proto_remove(ct, data))
@@ -568,8 +552,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
568 * will delete entry from already-freed table. 552 * will delete entry from already-freed table.
569 */ 553 */
570 clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); 554 clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
571 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 555 __nf_nat_cleanup_conntrack(ct);
572 nf_nat_bysource_params);
573 556
574 /* don't delete conntrack. Although that would make things a lot 557 /* don't delete conntrack. Although that would make things a lot
575 * simpler, we'd end up flushing all conntracks on nat rmmod. 558 * simpler, we'd end up flushing all conntracks on nat rmmod.
@@ -698,8 +681,7 @@ EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
698static void nf_nat_cleanup_conntrack(struct nf_conn *ct) 681static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
699{ 682{
700 if (ct->status & IPS_SRC_NAT_DONE) 683 if (ct->status & IPS_SRC_NAT_DONE)
701 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 684 __nf_nat_cleanup_conntrack(ct);
702 nf_nat_bysource_params);
703} 685}
704 686
705static struct nf_ct_ext_type nat_extend __read_mostly = { 687static struct nf_ct_ext_type nat_extend __read_mostly = {
@@ -821,19 +803,27 @@ static struct nf_ct_helper_expectfn follow_master_nat = {
821 803
822static int __init nf_nat_init(void) 804static int __init nf_nat_init(void)
823{ 805{
824 int ret; 806 int ret, i;
825 807
826 ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params); 808 /* Leave them the same for the moment. */
827 if (ret) 809 nf_nat_htable_size = nf_conntrack_htable_size;
828 return ret; 810 if (nf_nat_htable_size < ARRAY_SIZE(nf_nat_locks))
811 nf_nat_htable_size = ARRAY_SIZE(nf_nat_locks);
812
813 nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
814 if (!nf_nat_bysource)
815 return -ENOMEM;
829 816
830 ret = nf_ct_extend_register(&nat_extend); 817 ret = nf_ct_extend_register(&nat_extend);
831 if (ret < 0) { 818 if (ret < 0) {
832 rhltable_destroy(&nf_nat_bysource_table); 819 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
833 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 820 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
834 return ret; 821 return ret;
835 } 822 }
836 823
824 for (i = 0; i < ARRAY_SIZE(nf_nat_locks); i++)
825 spin_lock_init(&nf_nat_locks[i]);
826
837 nf_ct_helper_expectfn_register(&follow_master_nat); 827 nf_ct_helper_expectfn_register(&follow_master_nat);
838 828
839 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); 829 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
@@ -863,8 +853,8 @@ static void __exit nf_nat_cleanup(void)
863 853
864 for (i = 0; i < NFPROTO_NUMPROTO; i++) 854 for (i = 0; i < NFPROTO_NUMPROTO; i++)
865 kfree(nf_nat_l4protos[i]); 855 kfree(nf_nat_l4protos[i]);
866 856 synchronize_net();
867 rhltable_destroy(&nf_nat_bysource_table); 857 nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
868} 858}
869 859
870MODULE_LICENSE("GPL"); 860MODULE_LICENSE("GPL");
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 10d48234f5f4..5da8746f7b88 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -35,6 +35,7 @@
35#include <linux/netfilter_ipv6/ip6_tables.h> 35#include <linux/netfilter_ipv6/ip6_tables.h>
36#include <linux/netfilter/xt_hashlimit.h> 36#include <linux/netfilter/xt_hashlimit.h>
37#include <linux/mutex.h> 37#include <linux/mutex.h>
38#include <linux/kernel.h>
38 39
39MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
40MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 41MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -279,7 +280,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
279 size = cfg->size; 280 size = cfg->size;
280 } else { 281 } else {
281 size = (totalram_pages << PAGE_SHIFT) / 16384 / 282 size = (totalram_pages << PAGE_SHIFT) / 16384 /
282 sizeof(struct list_head); 283 sizeof(struct hlist_head);
283 if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE) 284 if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
284 size = 8192; 285 size = 8192;
285 if (size < 16) 286 if (size < 16)
@@ -287,7 +288,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
287 } 288 }
288 /* FIXME: don't use vmalloc() here or anywhere else -HW */ 289 /* FIXME: don't use vmalloc() here or anywhere else -HW */
289 hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) + 290 hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
290 sizeof(struct list_head) * size); 291 sizeof(struct hlist_head) * size);
291 if (hinfo == NULL) 292 if (hinfo == NULL)
292 return -ENOMEM; 293 return -ENOMEM;
293 *out_hinfo = hinfo; 294 *out_hinfo = hinfo;
@@ -527,12 +528,12 @@ static u64 user2rate(u64 user)
527 } 528 }
528} 529}
529 530
530static u64 user2rate_bytes(u64 user) 531static u64 user2rate_bytes(u32 user)
531{ 532{
532 u64 r; 533 u64 r;
533 534
534 r = user ? 0xFFFFFFFFULL / user : 0xFFFFFFFFULL; 535 r = user ? U32_MAX / user : U32_MAX;
535 r = (r - 1) << 4; 536 r = (r - 1) << XT_HASHLIMIT_BYTE_SHIFT;
536 return r; 537 return r;
537} 538}
538 539
@@ -588,7 +589,8 @@ static void rateinfo_init(struct dsthash_ent *dh,
588 dh->rateinfo.prev_window = 0; 589 dh->rateinfo.prev_window = 0;
589 dh->rateinfo.current_rate = 0; 590 dh->rateinfo.current_rate = 0;
590 if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) { 591 if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) {
591 dh->rateinfo.rate = user2rate_bytes(hinfo->cfg.avg); 592 dh->rateinfo.rate =
593 user2rate_bytes((u32)hinfo->cfg.avg);
592 if (hinfo->cfg.burst) 594 if (hinfo->cfg.burst)
593 dh->rateinfo.burst = 595 dh->rateinfo.burst =
594 hinfo->cfg.burst * dh->rateinfo.rate; 596 hinfo->cfg.burst * dh->rateinfo.rate;
@@ -870,7 +872,7 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
870 872
871 /* Check for overflow. */ 873 /* Check for overflow. */
872 if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) { 874 if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) {
873 if (cfg->avg == 0) { 875 if (cfg->avg == 0 || cfg->avg > U32_MAX) {
874 pr_info("hashlimit invalid rate\n"); 876 pr_info("hashlimit invalid rate\n");
875 return -ERANGE; 877 return -ERANGE;
876 } 878 }