aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2014-03-03 08:45:20 -0500
committerPablo Neira Ayuso <pablo@netfilter.org>2014-03-07 05:40:38 -0500
commitb7779d06f9950e14a008a2de970b44233fe49c86 (patch)
tree2044ec0cd420ca28f764806f41055e0c6ab33d99
parentb476b72a0f8514a5a4c561bab731ddd506a284e7 (diff)
netfilter: conntrack: spinlock per cpu to protect special lists.
One spinlock per cpu to protect dying/unconfirmed/template special lists. (These lists are now per cpu, a bit like the untracked ct) Add a @cpu field to nf_conn, to make sure we hold the appropriate spinlock at removal time. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net> Reviewed-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
-rw-r--r--include/net/netfilter/nf_conntrack.h3
-rw-r--r--include/net/netns/conntrack.h11
-rw-r--r--net/netfilter/nf_conntrack_core.c141
-rw-r--r--net/netfilter/nf_conntrack_helper.c11
-rw-r--r--net/netfilter/nf_conntrack_netlink.c81
5 files changed, 168 insertions, 79 deletions
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index e10d1faa6d09..37252f71a380 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -82,7 +82,8 @@ struct nf_conn {
82 */ 82 */
83 struct nf_conntrack ct_general; 83 struct nf_conntrack ct_general;
84 84
85 spinlock_t lock; 85 spinlock_t lock;
86 u16 cpu;
86 87
87 /* XXX should I move this to the tail ? - Y.K */ 88 /* XXX should I move this to the tail ? - Y.K */
88 /* These are my tuples; original and reply */ 89 /* These are my tuples; original and reply */
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index fbcc7fa536dc..c6a8994e9922 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -62,6 +62,13 @@ struct nf_ip_net {
62#endif 62#endif
63}; 63};
64 64
65struct ct_pcpu {
66 spinlock_t lock;
67 struct hlist_nulls_head unconfirmed;
68 struct hlist_nulls_head dying;
69 struct hlist_nulls_head tmpl;
70};
71
65struct netns_ct { 72struct netns_ct {
66 atomic_t count; 73 atomic_t count;
67 unsigned int expect_count; 74 unsigned int expect_count;
@@ -86,9 +93,7 @@ struct netns_ct {
86 struct kmem_cache *nf_conntrack_cachep; 93 struct kmem_cache *nf_conntrack_cachep;
87 struct hlist_nulls_head *hash; 94 struct hlist_nulls_head *hash;
88 struct hlist_head *expect_hash; 95 struct hlist_head *expect_hash;
89 struct hlist_nulls_head unconfirmed; 96 struct ct_pcpu __percpu *pcpu_lists;
90 struct hlist_nulls_head dying;
91 struct hlist_nulls_head tmpl;
92 struct ip_conntrack_stat __percpu *stat; 97 struct ip_conntrack_stat __percpu *stat;
93 struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; 98 struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
94 struct nf_exp_event_notifier __rcu *nf_expect_event_cb; 99 struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 965693eb1f0e..289b27901d8c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -192,6 +192,50 @@ clean_from_lists(struct nf_conn *ct)
192 nf_ct_remove_expectations(ct); 192 nf_ct_remove_expectations(ct);
193} 193}
194 194
195/* must be called with local_bh_disable */
196static void nf_ct_add_to_dying_list(struct nf_conn *ct)
197{
198 struct ct_pcpu *pcpu;
199
200 /* add this conntrack to the (per cpu) dying list */
201 ct->cpu = smp_processor_id();
202 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
203
204 spin_lock(&pcpu->lock);
205 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
206 &pcpu->dying);
207 spin_unlock(&pcpu->lock);
208}
209
210/* must be called with local_bh_disable */
211static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
212{
213 struct ct_pcpu *pcpu;
214
215 /* add this conntrack to the (per cpu) unconfirmed list */
216 ct->cpu = smp_processor_id();
217 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
218
219 spin_lock(&pcpu->lock);
220 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
221 &pcpu->unconfirmed);
222 spin_unlock(&pcpu->lock);
223}
224
225/* must be called with local_bh_disable */
226static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
227{
228 struct ct_pcpu *pcpu;
229
230 /* We overload first tuple to link into unconfirmed or dying list.*/
231 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
232
233 spin_lock(&pcpu->lock);
234 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
235 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
236 spin_unlock(&pcpu->lock);
237}
238
195static void 239static void
196destroy_conntrack(struct nf_conntrack *nfct) 240destroy_conntrack(struct nf_conntrack *nfct)
197{ 241{
@@ -220,9 +264,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
220 * too. */ 264 * too. */
221 nf_ct_remove_expectations(ct); 265 nf_ct_remove_expectations(ct);
222 266
223 /* We overload first tuple to link into unconfirmed or dying list.*/ 267 nf_ct_del_from_dying_or_unconfirmed_list(ct);
224 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
225 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
226 268
227 NF_CT_STAT_INC(net, delete); 269 NF_CT_STAT_INC(net, delete);
228 spin_unlock_bh(&nf_conntrack_lock); 270 spin_unlock_bh(&nf_conntrack_lock);
@@ -244,9 +286,7 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
244 * Otherwise we can get spurious warnings. */ 286 * Otherwise we can get spurious warnings. */
245 NF_CT_STAT_INC(net, delete_list); 287 NF_CT_STAT_INC(net, delete_list);
246 clean_from_lists(ct); 288 clean_from_lists(ct);
247 /* add this conntrack to the dying list */ 289 nf_ct_add_to_dying_list(ct);
248 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
249 &net->ct.dying);
250 spin_unlock_bh(&nf_conntrack_lock); 290 spin_unlock_bh(&nf_conntrack_lock);
251} 291}
252 292
@@ -467,15 +507,22 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
467/* deletion from this larval template list happens via nf_ct_put() */ 507/* deletion from this larval template list happens via nf_ct_put() */
468void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl) 508void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
469{ 509{
510 struct ct_pcpu *pcpu;
511
470 __set_bit(IPS_TEMPLATE_BIT, &tmpl->status); 512 __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
471 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status); 513 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
472 nf_conntrack_get(&tmpl->ct_general); 514 nf_conntrack_get(&tmpl->ct_general);
473 515
474 spin_lock_bh(&nf_conntrack_lock); 516 /* add this conntrack to the (per cpu) tmpl list */
517 local_bh_disable();
518 tmpl->cpu = smp_processor_id();
519 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
520
521 spin_lock(&pcpu->lock);
475 /* Overload tuple linked list to put us in template list. */ 522 /* Overload tuple linked list to put us in template list. */
476 hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, 523 hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
477 &net->ct.tmpl); 524 &pcpu->tmpl);
478 spin_unlock_bh(&nf_conntrack_lock); 525 spin_unlock_bh(&pcpu->lock);
479} 526}
480EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert); 527EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
481 528
@@ -546,8 +593,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
546 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) 593 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
547 goto out; 594 goto out;
548 595
549 /* Remove from unconfirmed list */ 596 nf_ct_del_from_dying_or_unconfirmed_list(ct);
550 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
551 597
552 /* Timer relative to confirmation time, not original 598 /* Timer relative to confirmation time, not original
553 setting time, otherwise we'd get timer wrap in 599 setting time, otherwise we'd get timer wrap in
@@ -879,10 +925,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
879 925
880 /* Now it is inserted into the unconfirmed list, bump refcount */ 926 /* Now it is inserted into the unconfirmed list, bump refcount */
881 nf_conntrack_get(&ct->ct_general); 927 nf_conntrack_get(&ct->ct_general);
882 928 nf_ct_add_to_unconfirmed_list(ct);
883 /* Overload tuple linked list to put us in unconfirmed list. */
884 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
885 &net->ct.unconfirmed);
886 929
887 spin_unlock_bh(&nf_conntrack_lock); 930 spin_unlock_bh(&nf_conntrack_lock);
888 931
@@ -1254,6 +1297,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1254 struct nf_conntrack_tuple_hash *h; 1297 struct nf_conntrack_tuple_hash *h;
1255 struct nf_conn *ct; 1298 struct nf_conn *ct;
1256 struct hlist_nulls_node *n; 1299 struct hlist_nulls_node *n;
1300 int cpu;
1257 1301
1258 spin_lock_bh(&nf_conntrack_lock); 1302 spin_lock_bh(&nf_conntrack_lock);
1259 for (; *bucket < net->ct.htable_size; (*bucket)++) { 1303 for (; *bucket < net->ct.htable_size; (*bucket)++) {
@@ -1265,12 +1309,19 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1265 goto found; 1309 goto found;
1266 } 1310 }
1267 } 1311 }
1268 hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
1269 ct = nf_ct_tuplehash_to_ctrack(h);
1270 if (iter(ct, data))
1271 set_bit(IPS_DYING_BIT, &ct->status);
1272 }
1273 spin_unlock_bh(&nf_conntrack_lock); 1312 spin_unlock_bh(&nf_conntrack_lock);
1313
1314 for_each_possible_cpu(cpu) {
1315 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1316
1317 spin_lock_bh(&pcpu->lock);
1318 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1319 ct = nf_ct_tuplehash_to_ctrack(h);
1320 if (iter(ct, data))
1321 set_bit(IPS_DYING_BIT, &ct->status);
1322 }
1323 spin_unlock_bh(&pcpu->lock);
1324 }
1274 return NULL; 1325 return NULL;
1275found: 1326found:
1276 atomic_inc(&ct->ct_general.use); 1327 atomic_inc(&ct->ct_general.use);
@@ -1323,14 +1374,19 @@ static void nf_ct_release_dying_list(struct net *net)
1323 struct nf_conntrack_tuple_hash *h; 1374 struct nf_conntrack_tuple_hash *h;
1324 struct nf_conn *ct; 1375 struct nf_conn *ct;
1325 struct hlist_nulls_node *n; 1376 struct hlist_nulls_node *n;
1377 int cpu;
1326 1378
1327 spin_lock_bh(&nf_conntrack_lock); 1379 for_each_possible_cpu(cpu) {
1328 hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) { 1380 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1329 ct = nf_ct_tuplehash_to_ctrack(h); 1381
1330 /* never fails to remove them, no listeners at this point */ 1382 spin_lock_bh(&pcpu->lock);
1331 nf_ct_kill(ct); 1383 hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
1384 ct = nf_ct_tuplehash_to_ctrack(h);
1385 /* never fails to remove them, no listeners at this point */
1386 nf_ct_kill(ct);
1387 }
1388 spin_unlock_bh(&pcpu->lock);
1332 } 1389 }
1333 spin_unlock_bh(&nf_conntrack_lock);
1334} 1390}
1335 1391
1336static int untrack_refs(void) 1392static int untrack_refs(void)
@@ -1417,6 +1473,7 @@ i_see_dead_people:
1417 kmem_cache_destroy(net->ct.nf_conntrack_cachep); 1473 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1418 kfree(net->ct.slabname); 1474 kfree(net->ct.slabname);
1419 free_percpu(net->ct.stat); 1475 free_percpu(net->ct.stat);
1476 free_percpu(net->ct.pcpu_lists);
1420 } 1477 }
1421} 1478}
1422 1479
@@ -1629,37 +1686,43 @@ void nf_conntrack_init_end(void)
1629 1686
1630int nf_conntrack_init_net(struct net *net) 1687int nf_conntrack_init_net(struct net *net)
1631{ 1688{
1632 int ret; 1689 int ret = -ENOMEM;
1690 int cpu;
1633 1691
1634 atomic_set(&net->ct.count, 0); 1692 atomic_set(&net->ct.count, 0);
1635 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL); 1693
1636 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL); 1694 net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
1637 INIT_HLIST_NULLS_HEAD(&net->ct.tmpl, TEMPLATE_NULLS_VAL); 1695 if (!net->ct.pcpu_lists)
1638 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1639 if (!net->ct.stat) {
1640 ret = -ENOMEM;
1641 goto err_stat; 1696 goto err_stat;
1697
1698 for_each_possible_cpu(cpu) {
1699 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1700
1701 spin_lock_init(&pcpu->lock);
1702 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1703 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1704 INIT_HLIST_NULLS_HEAD(&pcpu->tmpl, TEMPLATE_NULLS_VAL);
1642 } 1705 }
1643 1706
1707 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1708 if (!net->ct.stat)
1709 goto err_pcpu_lists;
1710
1644 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); 1711 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1645 if (!net->ct.slabname) { 1712 if (!net->ct.slabname)
1646 ret = -ENOMEM;
1647 goto err_slabname; 1713 goto err_slabname;
1648 }
1649 1714
1650 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, 1715 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1651 sizeof(struct nf_conn), 0, 1716 sizeof(struct nf_conn), 0,
1652 SLAB_DESTROY_BY_RCU, NULL); 1717 SLAB_DESTROY_BY_RCU, NULL);
1653 if (!net->ct.nf_conntrack_cachep) { 1718 if (!net->ct.nf_conntrack_cachep) {
1654 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1719 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1655 ret = -ENOMEM;
1656 goto err_cache; 1720 goto err_cache;
1657 } 1721 }
1658 1722
1659 net->ct.htable_size = nf_conntrack_htable_size; 1723 net->ct.htable_size = nf_conntrack_htable_size;
1660 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); 1724 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
1661 if (!net->ct.hash) { 1725 if (!net->ct.hash) {
1662 ret = -ENOMEM;
1663 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1726 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1664 goto err_hash; 1727 goto err_hash;
1665 } 1728 }
@@ -1701,6 +1764,8 @@ err_cache:
1701 kfree(net->ct.slabname); 1764 kfree(net->ct.slabname);
1702err_slabname: 1765err_slabname:
1703 free_percpu(net->ct.stat); 1766 free_percpu(net->ct.stat);
1767err_pcpu_lists:
1768 free_percpu(net->ct.pcpu_lists);
1704err_stat: 1769err_stat:
1705 return ret; 1770 return ret;
1706} 1771}
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 974a2a4adefa..27d9302c2191 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -396,6 +396,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
396 const struct hlist_node *next; 396 const struct hlist_node *next;
397 const struct hlist_nulls_node *nn; 397 const struct hlist_nulls_node *nn;
398 unsigned int i; 398 unsigned int i;
399 int cpu;
399 400
400 /* Get rid of expectations */ 401 /* Get rid of expectations */
401 for (i = 0; i < nf_ct_expect_hsize; i++) { 402 for (i = 0; i < nf_ct_expect_hsize; i++) {
@@ -414,8 +415,14 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
414 } 415 }
415 416
416 /* Get rid of expecteds, set helpers to NULL. */ 417 /* Get rid of expecteds, set helpers to NULL. */
417 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) 418 for_each_possible_cpu(cpu) {
418 unhelp(h, me); 419 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
420
421 spin_lock_bh(&pcpu->lock);
422 hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
423 unhelp(h, me);
424 spin_unlock_bh(&pcpu->lock);
425 }
419 for (i = 0; i < net->ct.htable_size; i++) { 426 for (i = 0; i < net->ct.htable_size; i++) {
420 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 427 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
421 unhelp(h, me); 428 unhelp(h, me);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 47e9369997ef..4ac8ce68bc16 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1137,50 +1137,65 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
1137} 1137}
1138 1138
1139static int 1139static int
1140ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, 1140ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
1141 struct hlist_nulls_head *list)
1142{ 1141{
1143 struct nf_conn *ct, *last; 1142 struct nf_conn *ct, *last = NULL;
1144 struct nf_conntrack_tuple_hash *h; 1143 struct nf_conntrack_tuple_hash *h;
1145 struct hlist_nulls_node *n; 1144 struct hlist_nulls_node *n;
1146 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1145 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1147 u_int8_t l3proto = nfmsg->nfgen_family; 1146 u_int8_t l3proto = nfmsg->nfgen_family;
1148 int res; 1147 int res;
1148 int cpu;
1149 struct hlist_nulls_head *list;
1150 struct net *net = sock_net(skb->sk);
1149 1151
1150 if (cb->args[2]) 1152 if (cb->args[2])
1151 return 0; 1153 return 0;
1152 1154
1153 spin_lock_bh(&nf_conntrack_lock); 1155 if (cb->args[0] == nr_cpu_ids)
1154 last = (struct nf_conn *)cb->args[1]; 1156 return 0;
1155restart: 1157
1156 hlist_nulls_for_each_entry(h, n, list, hnnode) { 1158 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1157 ct = nf_ct_tuplehash_to_ctrack(h); 1159 struct ct_pcpu *pcpu;
1158 if (l3proto && nf_ct_l3num(ct) != l3proto) 1160
1161 if (!cpu_possible(cpu))
1159 continue; 1162 continue;
1160 if (cb->args[1]) { 1163
1161 if (ct != last) 1164 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1165 spin_lock_bh(&pcpu->lock);
1166 last = (struct nf_conn *)cb->args[1];
1167 list = dying ? &pcpu->dying : &pcpu->unconfirmed;
1168restart:
1169 hlist_nulls_for_each_entry(h, n, list, hnnode) {
1170 ct = nf_ct_tuplehash_to_ctrack(h);
1171 if (l3proto && nf_ct_l3num(ct) != l3proto)
1162 continue; 1172 continue;
1163 cb->args[1] = 0; 1173 if (cb->args[1]) {
1164 } 1174 if (ct != last)
1165 rcu_read_lock(); 1175 continue;
1166 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid, 1176 cb->args[1] = 0;
1167 cb->nlh->nlmsg_seq, 1177 }
1168 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 1178 rcu_read_lock();
1169 ct); 1179 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1170 rcu_read_unlock(); 1180 cb->nlh->nlmsg_seq,
1171 if (res < 0) { 1181 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1172 nf_conntrack_get(&ct->ct_general); 1182 ct);
1173 cb->args[1] = (unsigned long)ct; 1183 rcu_read_unlock();
1174 goto out; 1184 if (res < 0) {
1185 nf_conntrack_get(&ct->ct_general);
1186 cb->args[1] = (unsigned long)ct;
1187 spin_unlock_bh(&pcpu->lock);
1188 goto out;
1189 }
1175 } 1190 }
1191 if (cb->args[1]) {
1192 cb->args[1] = 0;
1193 goto restart;
1194 } else
1195 cb->args[2] = 1;
1196 spin_unlock_bh(&pcpu->lock);
1176 } 1197 }
1177 if (cb->args[1]) {
1178 cb->args[1] = 0;
1179 goto restart;
1180 } else
1181 cb->args[2] = 1;
1182out: 1198out:
1183 spin_unlock_bh(&nf_conntrack_lock);
1184 if (last) 1199 if (last)
1185 nf_ct_put(last); 1200 nf_ct_put(last);
1186 1201
@@ -1190,9 +1205,7 @@ out:
1190static int 1205static int
1191ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb) 1206ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1192{ 1207{
1193 struct net *net = sock_net(skb->sk); 1208 return ctnetlink_dump_list(skb, cb, true);
1194
1195 return ctnetlink_dump_list(skb, cb, &net->ct.dying);
1196} 1209}
1197 1210
1198static int 1211static int
@@ -1214,9 +1227,7 @@ ctnetlink_get_ct_dying(struct sock *ctnl, struct sk_buff *skb,
1214static int 1227static int
1215ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb) 1228ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1216{ 1229{
1217 struct net *net = sock_net(skb->sk); 1230 return ctnetlink_dump_list(skb, cb, false);
1218
1219 return ctnetlink_dump_list(skb, cb, &net->ct.unconfirmed);
1220} 1231}
1221 1232
1222static int 1233static int