aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/proto.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-11-26 00:17:14 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-26 00:17:14 -0500
commitdd24c00191d5e4a1ae896aafe33c6b8095ab4bd1 (patch)
treee955c09e0b288e50c706b6ee409229d5a930c80c /net/dccp/proto.c
parent1748376b6626acf59c24e9592ac67b3fe2a0e026 (diff)
net: Use a percpu_counter for orphan_count
Instead of using one atomic_t per protocol, use a percpu_counter for "orphan_count", to reduce cache line contention on heavy duty network servers. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/dccp/proto.c')
-rw-r--r--net/dccp/proto.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index ea85c423cdbd..db225f93cd5a 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -40,8 +40,7 @@ DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
40 40
41EXPORT_SYMBOL_GPL(dccp_statistics); 41EXPORT_SYMBOL_GPL(dccp_statistics);
42 42
43atomic_t dccp_orphan_count = ATOMIC_INIT(0); 43struct percpu_counter dccp_orphan_count;
44
45EXPORT_SYMBOL_GPL(dccp_orphan_count); 44EXPORT_SYMBOL_GPL(dccp_orphan_count);
46 45
47struct inet_hashinfo dccp_hashinfo; 46struct inet_hashinfo dccp_hashinfo;
@@ -1000,7 +999,7 @@ adjudge_to_death:
1000 state = sk->sk_state; 999 state = sk->sk_state;
1001 sock_hold(sk); 1000 sock_hold(sk);
1002 sock_orphan(sk); 1001 sock_orphan(sk);
1003 atomic_inc(sk->sk_prot->orphan_count); 1002 percpu_counter_inc(sk->sk_prot->orphan_count);
1004 1003
1005 /* 1004 /*
1006 * It is the last release_sock in its life. It will remove backlog. 1005 * It is the last release_sock in its life. It will remove backlog.
@@ -1064,18 +1063,21 @@ static int __init dccp_init(void)
1064{ 1063{
1065 unsigned long goal; 1064 unsigned long goal;
1066 int ehash_order, bhash_order, i; 1065 int ehash_order, bhash_order, i;
1067 int rc = -ENOBUFS; 1066 int rc;
1068 1067
1069 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) > 1068 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1070 FIELD_SIZEOF(struct sk_buff, cb)); 1069 FIELD_SIZEOF(struct sk_buff, cb));
1071 1070 rc = percpu_counter_init(&dccp_orphan_count, 0);
1071 if (rc)
1072 goto out;
1073 rc = -ENOBUFS;
1072 inet_hashinfo_init(&dccp_hashinfo); 1074 inet_hashinfo_init(&dccp_hashinfo);
1073 dccp_hashinfo.bind_bucket_cachep = 1075 dccp_hashinfo.bind_bucket_cachep =
1074 kmem_cache_create("dccp_bind_bucket", 1076 kmem_cache_create("dccp_bind_bucket",
1075 sizeof(struct inet_bind_bucket), 0, 1077 sizeof(struct inet_bind_bucket), 0,
1076 SLAB_HWCACHE_ALIGN, NULL); 1078 SLAB_HWCACHE_ALIGN, NULL);
1077 if (!dccp_hashinfo.bind_bucket_cachep) 1079 if (!dccp_hashinfo.bind_bucket_cachep)
1078 goto out; 1080 goto out_free_percpu;
1079 1081
1080 /* 1082 /*
1081 * Size and allocate the main established and bind bucket 1083 * Size and allocate the main established and bind bucket
@@ -1168,6 +1170,8 @@ out_free_dccp_ehash:
1168out_free_bind_bucket_cachep: 1170out_free_bind_bucket_cachep:
1169 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1171 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1170 dccp_hashinfo.bind_bucket_cachep = NULL; 1172 dccp_hashinfo.bind_bucket_cachep = NULL;
1173out_free_percpu:
1174 percpu_counter_destroy(&dccp_orphan_count);
1171 goto out; 1175 goto out;
1172} 1176}
1173 1177