aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inet_hashtables.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-05-26 10:55:34 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-26 19:48:46 -0400
commit095dc8e0c3686d586a01a50abc3e1bb9ac633054 (patch)
tree5b6b15dcfcc64e553af3587a83d70b5f859f8a45 /net/ipv4/inet_hashtables.c
parentf3903bcc0091df871ac64261f65ed2e4c3519d39 (diff)
tcp: fix/cleanup inet_ehash_locks_alloc()
If tcp ehash table is constrained to a very small number of buckets (eg boot parameter thash_entries=128), then we can crash if spinlock array has more entries. While we are at it, un-inline inet_ehash_locks_alloc() and make following changes : - Budget 2 cache lines per cpu worth of 'spinlocks' - Try to kmalloc() the array to avoid extra TLB pressure. (Most servers at Google allocate 8192 bytes for this hash table) - Get rid of various #ifdef Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_hashtables.c')
-rw-r--r--net/ipv4/inet_hashtables.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 3766bddb3e8a..185efef0f125 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -18,6 +18,7 @@
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/wait.h> 20#include <linux/wait.h>
21#include <linux/vmalloc.h>
21 22
22#include <net/inet_connection_sock.h> 23#include <net/inet_connection_sock.h>
23#include <net/inet_hashtables.h> 24#include <net/inet_hashtables.h>
@@ -609,3 +610,33 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
609 } 610 }
610} 611}
611EXPORT_SYMBOL_GPL(inet_hashinfo_init); 612EXPORT_SYMBOL_GPL(inet_hashinfo_init);
613
614int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
615{
616 unsigned int i, nblocks = 1;
617
618 if (sizeof(spinlock_t) != 0) {
619 /* allocate 2 cache lines or at least one spinlock per cpu */
620 nblocks = max_t(unsigned int,
621 2 * L1_CACHE_BYTES / sizeof(spinlock_t),
622 1);
623 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
624
625 /* no more locks than number of hash buckets */
626 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
627
628 hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t),
629 GFP_KERNEL | __GFP_NOWARN);
630 if (!hashinfo->ehash_locks)
631 hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t));
632
633 if (!hashinfo->ehash_locks)
634 return -ENOMEM;
635
636 for (i = 0; i < nblocks; i++)
637 spin_lock_init(&hashinfo->ehash_locks[i]);
638 }
639 hashinfo->ehash_locks_mask = nblocks - 1;
640 return 0;
641}
642EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);