aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2008-01-31 07:38:58 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-31 22:27:54 -0500
commitf8ba1affa18398610e765736153fff614309ccc8 (patch)
treeb2143ef65aa92e5995070824798b04d935044e24 /net
parent76507f69c44ed199a1a68086145398459e55835d (diff)
[NETFILTER]: nf_conntrack: switch rwlock to spinlock
With the RCU conversion only write_lock usages of nf_conntrack_lock are left (except one read_lock that should actually use write_lock in the H.323 helper). Switch to a spinlock. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nf_conntrack_core.c38
-rw-r--r--net/netfilter/nf_conntrack_expect.c12
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c22
5 files changed, 40 insertions, 40 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index a54bfec61e79..f284dddfc899 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -40,7 +40,7 @@
40 40
41#define NF_CONNTRACK_VERSION "0.5.0" 41#define NF_CONNTRACK_VERSION "0.5.0"
42 42
43DEFINE_RWLOCK(nf_conntrack_lock); 43DEFINE_SPINLOCK(nf_conntrack_lock);
44EXPORT_SYMBOL_GPL(nf_conntrack_lock); 44EXPORT_SYMBOL_GPL(nf_conntrack_lock);
45 45
46/* nf_conntrack_standalone needs this */ 46/* nf_conntrack_standalone needs this */
@@ -199,7 +199,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
199 199
200 rcu_read_unlock(); 200 rcu_read_unlock();
201 201
202 write_lock_bh(&nf_conntrack_lock); 202 spin_lock_bh(&nf_conntrack_lock);
203 /* Expectations will have been removed in clean_from_lists, 203 /* Expectations will have been removed in clean_from_lists,
204 * except TFTP can create an expectation on the first packet, 204 * except TFTP can create an expectation on the first packet,
205 * before connection is in the list, so we need to clean here, 205 * before connection is in the list, so we need to clean here,
@@ -213,7 +213,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
213 } 213 }
214 214
215 NF_CT_STAT_INC(delete); 215 NF_CT_STAT_INC(delete);
216 write_unlock_bh(&nf_conntrack_lock); 216 spin_unlock_bh(&nf_conntrack_lock);
217 217
218 if (ct->master) 218 if (ct->master)
219 nf_ct_put(ct->master); 219 nf_ct_put(ct->master);
@@ -236,12 +236,12 @@ static void death_by_timeout(unsigned long ul_conntrack)
236 rcu_read_unlock(); 236 rcu_read_unlock();
237 } 237 }
238 238
239 write_lock_bh(&nf_conntrack_lock); 239 spin_lock_bh(&nf_conntrack_lock);
240 /* Inside lock so preempt is disabled on module removal path. 240 /* Inside lock so preempt is disabled on module removal path.
241 * Otherwise we can get spurious warnings. */ 241 * Otherwise we can get spurious warnings. */
242 NF_CT_STAT_INC(delete_list); 242 NF_CT_STAT_INC(delete_list);
243 clean_from_lists(ct); 243 clean_from_lists(ct);
244 write_unlock_bh(&nf_conntrack_lock); 244 spin_unlock_bh(&nf_conntrack_lock);
245 nf_ct_put(ct); 245 nf_ct_put(ct);
246} 246}
247 247
@@ -303,9 +303,9 @@ void nf_conntrack_hash_insert(struct nf_conn *ct)
303 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 303 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
304 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 304 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
305 305
306 write_lock_bh(&nf_conntrack_lock); 306 spin_lock_bh(&nf_conntrack_lock);
307 __nf_conntrack_hash_insert(ct, hash, repl_hash); 307 __nf_conntrack_hash_insert(ct, hash, repl_hash);
308 write_unlock_bh(&nf_conntrack_lock); 308 spin_unlock_bh(&nf_conntrack_lock);
309} 309}
310EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); 310EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
311 311
@@ -342,7 +342,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
342 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 342 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
343 pr_debug("Confirming conntrack %p\n", ct); 343 pr_debug("Confirming conntrack %p\n", ct);
344 344
345 write_lock_bh(&nf_conntrack_lock); 345 spin_lock_bh(&nf_conntrack_lock);
346 346
347 /* See if there's one in the list already, including reverse: 347 /* See if there's one in the list already, including reverse:
348 NAT could have grabbed it without realizing, since we're 348 NAT could have grabbed it without realizing, since we're
@@ -368,7 +368,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
368 atomic_inc(&ct->ct_general.use); 368 atomic_inc(&ct->ct_general.use);
369 set_bit(IPS_CONFIRMED_BIT, &ct->status); 369 set_bit(IPS_CONFIRMED_BIT, &ct->status);
370 NF_CT_STAT_INC(insert); 370 NF_CT_STAT_INC(insert);
371 write_unlock_bh(&nf_conntrack_lock); 371 spin_unlock_bh(&nf_conntrack_lock);
372 help = nfct_help(ct); 372 help = nfct_help(ct);
373 if (help && help->helper) 373 if (help && help->helper)
374 nf_conntrack_event_cache(IPCT_HELPER, skb); 374 nf_conntrack_event_cache(IPCT_HELPER, skb);
@@ -383,7 +383,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
383 383
384out: 384out:
385 NF_CT_STAT_INC(insert_failed); 385 NF_CT_STAT_INC(insert_failed);
386 write_unlock_bh(&nf_conntrack_lock); 386 spin_unlock_bh(&nf_conntrack_lock);
387 return NF_DROP; 387 return NF_DROP;
388} 388}
389EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); 389EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
@@ -538,7 +538,7 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
538 return NULL; 538 return NULL;
539 } 539 }
540 540
541 write_lock_bh(&nf_conntrack_lock); 541 spin_lock_bh(&nf_conntrack_lock);
542 exp = nf_ct_find_expectation(tuple); 542 exp = nf_ct_find_expectation(tuple);
543 if (exp) { 543 if (exp) {
544 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 544 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
@@ -576,7 +576,7 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
576 hlist_add_head(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].hnode, 576 hlist_add_head(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
577 &unconfirmed); 577 &unconfirmed);
578 578
579 write_unlock_bh(&nf_conntrack_lock); 579 spin_unlock_bh(&nf_conntrack_lock);
580 580
581 if (exp) { 581 if (exp) {
582 if (exp->expectfn) 582 if (exp->expectfn)
@@ -787,7 +787,7 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
787 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); 787 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
788 NF_CT_ASSERT(skb); 788 NF_CT_ASSERT(skb);
789 789
790 write_lock_bh(&nf_conntrack_lock); 790 spin_lock_bh(&nf_conntrack_lock);
791 791
792 /* Only update if this is not a fixed timeout */ 792 /* Only update if this is not a fixed timeout */
793 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) 793 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
@@ -824,7 +824,7 @@ acct:
824 } 824 }
825#endif 825#endif
826 826
827 write_unlock_bh(&nf_conntrack_lock); 827 spin_unlock_bh(&nf_conntrack_lock);
828 828
829 /* must be unlocked when calling event cache */ 829 /* must be unlocked when calling event cache */
830 if (event) 830 if (event)
@@ -909,7 +909,7 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
909 struct nf_conn *ct; 909 struct nf_conn *ct;
910 struct hlist_node *n; 910 struct hlist_node *n;
911 911
912 write_lock_bh(&nf_conntrack_lock); 912 spin_lock_bh(&nf_conntrack_lock);
913 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 913 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
914 hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) { 914 hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) {
915 ct = nf_ct_tuplehash_to_ctrack(h); 915 ct = nf_ct_tuplehash_to_ctrack(h);
@@ -922,11 +922,11 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
922 if (iter(ct, data)) 922 if (iter(ct, data))
923 set_bit(IPS_DYING_BIT, &ct->status); 923 set_bit(IPS_DYING_BIT, &ct->status);
924 } 924 }
925 write_unlock_bh(&nf_conntrack_lock); 925 spin_unlock_bh(&nf_conntrack_lock);
926 return NULL; 926 return NULL;
927found: 927found:
928 atomic_inc(&ct->ct_general.use); 928 atomic_inc(&ct->ct_general.use);
929 write_unlock_bh(&nf_conntrack_lock); 929 spin_unlock_bh(&nf_conntrack_lock);
930 return ct; 930 return ct;
931} 931}
932 932
@@ -1055,7 +1055,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1055 * created because of a false negative won't make it into the hash 1055 * created because of a false negative won't make it into the hash
1056 * though since that required taking the lock. 1056 * though since that required taking the lock.
1057 */ 1057 */
1058 write_lock_bh(&nf_conntrack_lock); 1058 spin_lock_bh(&nf_conntrack_lock);
1059 for (i = 0; i < nf_conntrack_htable_size; i++) { 1059 for (i = 0; i < nf_conntrack_htable_size; i++) {
1060 while (!hlist_empty(&nf_conntrack_hash[i])) { 1060 while (!hlist_empty(&nf_conntrack_hash[i])) {
1061 h = hlist_entry(nf_conntrack_hash[i].first, 1061 h = hlist_entry(nf_conntrack_hash[i].first,
@@ -1073,7 +1073,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1073 nf_conntrack_vmalloc = vmalloced; 1073 nf_conntrack_vmalloc = vmalloced;
1074 nf_conntrack_hash = hash; 1074 nf_conntrack_hash = hash;
1075 nf_conntrack_hash_rnd = rnd; 1075 nf_conntrack_hash_rnd = rnd;
1076 write_unlock_bh(&nf_conntrack_lock); 1076 spin_unlock_bh(&nf_conntrack_lock);
1077 1077
1078 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1078 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1079 return 0; 1079 return 0;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index a5c8ef01f925..e06bf0028bb1 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -65,9 +65,9 @@ static void nf_ct_expectation_timed_out(unsigned long ul_expect)
65{ 65{
66 struct nf_conntrack_expect *exp = (void *)ul_expect; 66 struct nf_conntrack_expect *exp = (void *)ul_expect;
67 67
68 write_lock_bh(&nf_conntrack_lock); 68 spin_lock_bh(&nf_conntrack_lock);
69 nf_ct_unlink_expect(exp); 69 nf_ct_unlink_expect(exp);
70 write_unlock_bh(&nf_conntrack_lock); 70 spin_unlock_bh(&nf_conntrack_lock);
71 nf_ct_expect_put(exp); 71 nf_ct_expect_put(exp);
72} 72}
73 73
@@ -201,12 +201,12 @@ static inline int expect_matches(const struct nf_conntrack_expect *a,
201/* Generally a bad idea to call this: could have matched already. */ 201/* Generally a bad idea to call this: could have matched already. */
202void nf_ct_unexpect_related(struct nf_conntrack_expect *exp) 202void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
203{ 203{
204 write_lock_bh(&nf_conntrack_lock); 204 spin_lock_bh(&nf_conntrack_lock);
205 if (del_timer(&exp->timeout)) { 205 if (del_timer(&exp->timeout)) {
206 nf_ct_unlink_expect(exp); 206 nf_ct_unlink_expect(exp);
207 nf_ct_expect_put(exp); 207 nf_ct_expect_put(exp);
208 } 208 }
209 write_unlock_bh(&nf_conntrack_lock); 209 spin_unlock_bh(&nf_conntrack_lock);
210} 210}
211EXPORT_SYMBOL_GPL(nf_ct_unexpect_related); 211EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
212 212
@@ -355,7 +355,7 @@ int nf_ct_expect_related(struct nf_conntrack_expect *expect)
355 355
356 NF_CT_ASSERT(master_help); 356 NF_CT_ASSERT(master_help);
357 357
358 write_lock_bh(&nf_conntrack_lock); 358 spin_lock_bh(&nf_conntrack_lock);
359 if (!master_help->helper) { 359 if (!master_help->helper) {
360 ret = -ESHUTDOWN; 360 ret = -ESHUTDOWN;
361 goto out; 361 goto out;
@@ -390,7 +390,7 @@ int nf_ct_expect_related(struct nf_conntrack_expect *expect)
390 nf_ct_expect_event(IPEXP_NEW, expect); 390 nf_ct_expect_event(IPEXP_NEW, expect);
391 ret = 0; 391 ret = 0;
392out: 392out:
393 write_unlock_bh(&nf_conntrack_lock); 393 spin_unlock_bh(&nf_conntrack_lock);
394 return ret; 394 return ret;
395} 395}
396EXPORT_SYMBOL_GPL(nf_ct_expect_related); 396EXPORT_SYMBOL_GPL(nf_ct_expect_related);
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 872c1aa3124c..02563050cc3a 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -1415,7 +1415,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1415 nf_ct_refresh(ct, skb, info->timeout * HZ); 1415 nf_ct_refresh(ct, skb, info->timeout * HZ);
1416 1416
1417 /* Set expect timeout */ 1417 /* Set expect timeout */
1418 read_lock_bh(&nf_conntrack_lock); 1418 spin_lock_bh(&nf_conntrack_lock);
1419 exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3, 1419 exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3,
1420 info->sig_port[!dir]); 1420 info->sig_port[!dir]);
1421 if (exp) { 1421 if (exp) {
@@ -1425,7 +1425,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1425 NF_CT_DUMP_TUPLE(&exp->tuple); 1425 NF_CT_DUMP_TUPLE(&exp->tuple);
1426 set_expect_timeout(exp, info->timeout); 1426 set_expect_timeout(exp, info->timeout);
1427 } 1427 }
1428 read_unlock_bh(&nf_conntrack_lock); 1428 spin_unlock_bh(&nf_conntrack_lock);
1429 } 1429 }
1430 1430
1431 return 0; 1431 return 0;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 42f781fcba64..b1fd21cc1dbc 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -138,7 +138,7 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
138 */ 138 */
139 synchronize_rcu(); 139 synchronize_rcu();
140 140
141 write_lock_bh(&nf_conntrack_lock); 141 spin_lock_bh(&nf_conntrack_lock);
142 142
143 /* Get rid of expectations */ 143 /* Get rid of expectations */
144 for (i = 0; i < nf_ct_expect_hsize; i++) { 144 for (i = 0; i < nf_ct_expect_hsize; i++) {
@@ -160,7 +160,7 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
160 hlist_for_each_entry(h, n, &nf_conntrack_hash[i], hnode) 160 hlist_for_each_entry(h, n, &nf_conntrack_hash[i], hnode)
161 unhelp(h, me); 161 unhelp(h, me);
162 } 162 }
163 write_unlock_bh(&nf_conntrack_lock); 163 spin_unlock_bh(&nf_conntrack_lock);
164} 164}
165EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); 165EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
166 166
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index b701dcce0e69..b6a8c089a075 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1220,7 +1220,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1220 return err; 1220 return err;
1221 } 1221 }
1222 1222
1223 write_lock_bh(&nf_conntrack_lock); 1223 spin_lock_bh(&nf_conntrack_lock);
1224 if (cda[CTA_TUPLE_ORIG]) 1224 if (cda[CTA_TUPLE_ORIG])
1225 h = __nf_conntrack_find(&otuple, NULL); 1225 h = __nf_conntrack_find(&otuple, NULL);
1226 else if (cda[CTA_TUPLE_REPLY]) 1226 else if (cda[CTA_TUPLE_REPLY])
@@ -1248,7 +1248,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1248 atomic_inc(&master_ct->ct_general.use); 1248 atomic_inc(&master_ct->ct_general.use);
1249 } 1249 }
1250 1250
1251 write_unlock_bh(&nf_conntrack_lock); 1251 spin_unlock_bh(&nf_conntrack_lock);
1252 err = -ENOENT; 1252 err = -ENOENT;
1253 if (nlh->nlmsg_flags & NLM_F_CREATE) 1253 if (nlh->nlmsg_flags & NLM_F_CREATE)
1254 err = ctnetlink_create_conntrack(cda, 1254 err = ctnetlink_create_conntrack(cda,
@@ -1281,7 +1281,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1281 } 1281 }
1282 1282
1283out_unlock: 1283out_unlock:
1284 write_unlock_bh(&nf_conntrack_lock); 1284 spin_unlock_bh(&nf_conntrack_lock);
1285 return err; 1285 return err;
1286} 1286}
1287 1287
@@ -1614,10 +1614,10 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1614 struct nf_conn_help *m_help; 1614 struct nf_conn_help *m_help;
1615 1615
1616 /* delete all expectations for this helper */ 1616 /* delete all expectations for this helper */
1617 write_lock_bh(&nf_conntrack_lock); 1617 spin_lock_bh(&nf_conntrack_lock);
1618 h = __nf_conntrack_helper_find_byname(name); 1618 h = __nf_conntrack_helper_find_byname(name);
1619 if (!h) { 1619 if (!h) {
1620 write_unlock_bh(&nf_conntrack_lock); 1620 spin_unlock_bh(&nf_conntrack_lock);
1621 return -EINVAL; 1621 return -EINVAL;
1622 } 1622 }
1623 for (i = 0; i < nf_ct_expect_hsize; i++) { 1623 for (i = 0; i < nf_ct_expect_hsize; i++) {
@@ -1632,10 +1632,10 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1632 } 1632 }
1633 } 1633 }
1634 } 1634 }
1635 write_unlock_bh(&nf_conntrack_lock); 1635 spin_unlock_bh(&nf_conntrack_lock);
1636 } else { 1636 } else {
1637 /* This basically means we have to flush everything*/ 1637 /* This basically means we have to flush everything*/
1638 write_lock_bh(&nf_conntrack_lock); 1638 spin_lock_bh(&nf_conntrack_lock);
1639 for (i = 0; i < nf_ct_expect_hsize; i++) { 1639 for (i = 0; i < nf_ct_expect_hsize; i++) {
1640 hlist_for_each_entry_safe(exp, n, next, 1640 hlist_for_each_entry_safe(exp, n, next,
1641 &nf_ct_expect_hash[i], 1641 &nf_ct_expect_hash[i],
@@ -1646,7 +1646,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1646 } 1646 }
1647 } 1647 }
1648 } 1648 }
1649 write_unlock_bh(&nf_conntrack_lock); 1649 spin_unlock_bh(&nf_conntrack_lock);
1650 } 1650 }
1651 1651
1652 return 0; 1652 return 0;
@@ -1732,11 +1732,11 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1732 if (err < 0) 1732 if (err < 0)
1733 return err; 1733 return err;
1734 1734
1735 write_lock_bh(&nf_conntrack_lock); 1735 spin_lock_bh(&nf_conntrack_lock);
1736 exp = __nf_ct_expect_find(&tuple); 1736 exp = __nf_ct_expect_find(&tuple);
1737 1737
1738 if (!exp) { 1738 if (!exp) {
1739 write_unlock_bh(&nf_conntrack_lock); 1739 spin_unlock_bh(&nf_conntrack_lock);
1740 err = -ENOENT; 1740 err = -ENOENT;
1741 if (nlh->nlmsg_flags & NLM_F_CREATE) 1741 if (nlh->nlmsg_flags & NLM_F_CREATE)
1742 err = ctnetlink_create_expect(cda, u3); 1742 err = ctnetlink_create_expect(cda, u3);
@@ -1746,7 +1746,7 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1746 err = -EEXIST; 1746 err = -EEXIST;
1747 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) 1747 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
1748 err = ctnetlink_change_expect(exp, cda); 1748 err = ctnetlink_change_expect(exp, cda);
1749 write_unlock_bh(&nf_conntrack_lock); 1749 spin_unlock_bh(&nf_conntrack_lock);
1750 1750
1751 return err; 1751 return err;
1752} 1752}