aboutsummaryrefslogtreecommitdiffstats
path: root/net/netfilter/ipvs/ip_vs_conn.c
diff options
context:
space:
mode:
authorChangli Gao <xiaosuo@gmail.com>2011-02-19 05:05:08 -0500
committerSimon Horman <horms@verge.net.au>2011-02-22 01:45:39 -0500
commit731109e78415b4cc6c2f8de6c11b37f0e40741f8 (patch)
tree1912a5dbb69a6baa98e81a6a1a1e62a20a58e30e /net/netfilter/ipvs/ip_vs_conn.c
parent41ac51eeda58a85b8a06d748cce7035cc77deebd (diff)
ipvs: use hlist instead of list
Signed-off-by: Changli Gao <xiaosuo@gmail.com> Signed-off-by: Simon Horman <horms@verge.net.au>
Diffstat (limited to 'net/netfilter/ipvs/ip_vs_conn.c')
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c52
1 files changed, 29 insertions, 23 deletions
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 83233fe24a0..9c2a517b69c 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -59,7 +59,7 @@ static int ip_vs_conn_tab_mask __read_mostly;
59/* 59/*
60 * Connection hash table: for input and output packets lookups of IPVS 60 * Connection hash table: for input and output packets lookups of IPVS
61 */ 61 */
62static struct list_head *ip_vs_conn_tab __read_mostly; 62static struct hlist_head *ip_vs_conn_tab __read_mostly;
63 63
64/* SLAB cache for IPVS connections */ 64/* SLAB cache for IPVS connections */
65static struct kmem_cache *ip_vs_conn_cachep __read_mostly; 65static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
@@ -201,7 +201,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
201 spin_lock(&cp->lock); 201 spin_lock(&cp->lock);
202 202
203 if (!(cp->flags & IP_VS_CONN_F_HASHED)) { 203 if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
204 list_add(&cp->c_list, &ip_vs_conn_tab[hash]); 204 hlist_add_head(&cp->c_list, &ip_vs_conn_tab[hash]);
205 cp->flags |= IP_VS_CONN_F_HASHED; 205 cp->flags |= IP_VS_CONN_F_HASHED;
206 atomic_inc(&cp->refcnt); 206 atomic_inc(&cp->refcnt);
207 ret = 1; 207 ret = 1;
@@ -234,7 +234,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
234 spin_lock(&cp->lock); 234 spin_lock(&cp->lock);
235 235
236 if (cp->flags & IP_VS_CONN_F_HASHED) { 236 if (cp->flags & IP_VS_CONN_F_HASHED) {
237 list_del(&cp->c_list); 237 hlist_del(&cp->c_list);
238 cp->flags &= ~IP_VS_CONN_F_HASHED; 238 cp->flags &= ~IP_VS_CONN_F_HASHED;
239 atomic_dec(&cp->refcnt); 239 atomic_dec(&cp->refcnt);
240 ret = 1; 240 ret = 1;
@@ -259,12 +259,13 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
259{ 259{
260 unsigned hash; 260 unsigned hash;
261 struct ip_vs_conn *cp; 261 struct ip_vs_conn *cp;
262 struct hlist_node *n;
262 263
263 hash = ip_vs_conn_hashkey_param(p, false); 264 hash = ip_vs_conn_hashkey_param(p, false);
264 265
265 ct_read_lock(hash); 266 ct_read_lock(hash);
266 267
267 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 268 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
268 if (cp->af == p->af && 269 if (cp->af == p->af &&
269 p->cport == cp->cport && p->vport == cp->vport && 270 p->cport == cp->cport && p->vport == cp->vport &&
270 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && 271 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
@@ -345,12 +346,13 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
345{ 346{
346 unsigned hash; 347 unsigned hash;
347 struct ip_vs_conn *cp; 348 struct ip_vs_conn *cp;
349 struct hlist_node *n;
348 350
349 hash = ip_vs_conn_hashkey_param(p, false); 351 hash = ip_vs_conn_hashkey_param(p, false);
350 352
351 ct_read_lock(hash); 353 ct_read_lock(hash);
352 354
353 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 355 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
354 if (!ip_vs_conn_net_eq(cp, p->net)) 356 if (!ip_vs_conn_net_eq(cp, p->net))
355 continue; 357 continue;
356 if (p->pe_data && p->pe->ct_match) { 358 if (p->pe_data && p->pe->ct_match) {
@@ -394,6 +396,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
394{ 396{
395 unsigned hash; 397 unsigned hash;
396 struct ip_vs_conn *cp, *ret=NULL; 398 struct ip_vs_conn *cp, *ret=NULL;
399 struct hlist_node *n;
397 400
398 /* 401 /*
399 * Check for "full" addressed entries 402 * Check for "full" addressed entries
@@ -402,7 +405,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
402 405
403 ct_read_lock(hash); 406 ct_read_lock(hash);
404 407
405 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 408 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
406 if (cp->af == p->af && 409 if (cp->af == p->af &&
407 p->vport == cp->cport && p->cport == cp->dport && 410 p->vport == cp->cport && p->cport == cp->dport &&
408 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && 411 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
@@ -818,7 +821,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
818 return NULL; 821 return NULL;
819 } 822 }
820 823
821 INIT_LIST_HEAD(&cp->c_list); 824 INIT_HLIST_NODE(&cp->c_list);
822 setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); 825 setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
823 ip_vs_conn_net_set(cp, p->net); 826 ip_vs_conn_net_set(cp, p->net);
824 cp->af = p->af; 827 cp->af = p->af;
@@ -894,8 +897,8 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
894 */ 897 */
895#ifdef CONFIG_PROC_FS 898#ifdef CONFIG_PROC_FS
896struct ip_vs_iter_state { 899struct ip_vs_iter_state {
897 struct seq_net_private p; 900 struct seq_net_private p;
898 struct list_head *l; 901 struct hlist_head *l;
899}; 902};
900 903
901static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos) 904static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
@@ -903,13 +906,14 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
903 int idx; 906 int idx;
904 struct ip_vs_conn *cp; 907 struct ip_vs_conn *cp;
905 struct ip_vs_iter_state *iter = seq->private; 908 struct ip_vs_iter_state *iter = seq->private;
909 struct hlist_node *n;
906 910
907 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 911 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
908 ct_read_lock_bh(idx); 912 ct_read_lock_bh(idx);
909 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 913 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
910 if (pos-- == 0) { 914 if (pos-- == 0) {
911 iter->l = &ip_vs_conn_tab[idx]; 915 iter->l = &ip_vs_conn_tab[idx];
912 return cp; 916 return cp;
913 } 917 }
914 } 918 }
915 ct_read_unlock_bh(idx); 919 ct_read_unlock_bh(idx);
@@ -930,7 +934,8 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
930{ 934{
931 struct ip_vs_conn *cp = v; 935 struct ip_vs_conn *cp = v;
932 struct ip_vs_iter_state *iter = seq->private; 936 struct ip_vs_iter_state *iter = seq->private;
933 struct list_head *e, *l = iter->l; 937 struct hlist_node *e;
938 struct hlist_head *l = iter->l;
934 int idx; 939 int idx;
935 940
936 ++*pos; 941 ++*pos;
@@ -938,15 +943,15 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
938 return ip_vs_conn_array(seq, 0); 943 return ip_vs_conn_array(seq, 0);
939 944
940 /* more on same hash chain? */ 945 /* more on same hash chain? */
941 if ((e = cp->c_list.next) != l) 946 if ((e = cp->c_list.next))
942 return list_entry(e, struct ip_vs_conn, c_list); 947 return hlist_entry(e, struct ip_vs_conn, c_list);
943 948
944 idx = l - ip_vs_conn_tab; 949 idx = l - ip_vs_conn_tab;
945 ct_read_unlock_bh(idx); 950 ct_read_unlock_bh(idx);
946 951
947 while (++idx < ip_vs_conn_tab_size) { 952 while (++idx < ip_vs_conn_tab_size) {
948 ct_read_lock_bh(idx); 953 ct_read_lock_bh(idx);
949 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 954 hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) {
950 iter->l = &ip_vs_conn_tab[idx]; 955 iter->l = &ip_vs_conn_tab[idx];
951 return cp; 956 return cp;
952 } 957 }
@@ -959,7 +964,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
959static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v) 964static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
960{ 965{
961 struct ip_vs_iter_state *iter = seq->private; 966 struct ip_vs_iter_state *iter = seq->private;
962 struct list_head *l = iter->l; 967 struct hlist_head *l = iter->l;
963 968
964 if (l) 969 if (l)
965 ct_read_unlock_bh(l - ip_vs_conn_tab); 970 ct_read_unlock_bh(l - ip_vs_conn_tab);
@@ -1148,13 +1153,14 @@ void ip_vs_random_dropentry(struct net *net)
1148 */ 1153 */
1149 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { 1154 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
1150 unsigned hash = net_random() & ip_vs_conn_tab_mask; 1155 unsigned hash = net_random() & ip_vs_conn_tab_mask;
1156 struct hlist_node *n;
1151 1157
1152 /* 1158 /*
1153 * Lock is actually needed in this loop. 1159 * Lock is actually needed in this loop.
1154 */ 1160 */
1155 ct_write_lock_bh(hash); 1161 ct_write_lock_bh(hash);
1156 1162
1157 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 1163 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
1158 if (cp->flags & IP_VS_CONN_F_TEMPLATE) 1164 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
1159 /* connection template */ 1165 /* connection template */
1160 continue; 1166 continue;
@@ -1202,12 +1208,14 @@ static void ip_vs_conn_flush(struct net *net)
1202 1208
1203flush_again: 1209flush_again:
1204 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 1210 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1211 struct hlist_node *n;
1212
1205 /* 1213 /*
1206 * Lock is actually needed in this loop. 1214 * Lock is actually needed in this loop.
1207 */ 1215 */
1208 ct_write_lock_bh(idx); 1216 ct_write_lock_bh(idx);
1209 1217
1210 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 1218 hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
1211 if (!ip_vs_conn_net_eq(cp, net)) 1219 if (!ip_vs_conn_net_eq(cp, net))
1212 continue; 1220 continue;
1213 IP_VS_DBG(4, "del connection\n"); 1221 IP_VS_DBG(4, "del connection\n");
@@ -1265,8 +1273,7 @@ int __init ip_vs_conn_init(void)
1265 /* 1273 /*
1266 * Allocate the connection hash table and initialize its list heads 1274 * Allocate the connection hash table and initialize its list heads
1267 */ 1275 */
1268 ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * 1276 ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
1269 sizeof(struct list_head));
1270 if (!ip_vs_conn_tab) 1277 if (!ip_vs_conn_tab)
1271 return -ENOMEM; 1278 return -ENOMEM;
1272 1279
@@ -1286,9 +1293,8 @@ int __init ip_vs_conn_init(void)
1286 IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n", 1293 IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
1287 sizeof(struct ip_vs_conn)); 1294 sizeof(struct ip_vs_conn));
1288 1295
1289 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 1296 for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
1290 INIT_LIST_HEAD(&ip_vs_conn_tab[idx]); 1297 INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
1291 }
1292 1298
1293 for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) { 1299 for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
1294 rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); 1300 rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);