diff options
author | wangweidong <wangweidong1@huawei.com> | 2014-01-21 02:44:12 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-01-21 21:41:36 -0500 |
commit | 5bc1d1b4a261a865cbde65b1561748df5b9c724b (patch) | |
tree | 4920c464aba555430904d4b93936a72a391016c7 /net/sctp | |
parent | 048ed4b6266144fdee55089c9eef55b0c1d42ba1 (diff) |
sctp: remove macros sctp_bh_[un]lock_sock
Redefined bh_[un]lock_sock to sctp_bh[un]lock_sock for user
space friendly code which we haven't use in years, so removing them.
Signed-off-by: Wang Weidong <wangweidong1@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/input.c | 18 | ||||
-rw-r--r-- | net/sctp/protocol.c | 4 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 16 | ||||
-rw-r--r-- | net/sctp/socket.c | 4 |
4 files changed, 21 insertions, 21 deletions
diff --git a/net/sctp/input.c b/net/sctp/input.c index 94f7f44049a6..f2e2cbd2d750 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -238,7 +238,7 @@ int sctp_rcv(struct sk_buff *skb) | |||
238 | * bottom halves on this lock, but a user may be in the lock too, | 238 | * bottom halves on this lock, but a user may be in the lock too, |
239 | * so check if it is busy. | 239 | * so check if it is busy. |
240 | */ | 240 | */ |
241 | sctp_bh_lock_sock(sk); | 241 | bh_lock_sock(sk); |
242 | 242 | ||
243 | if (sk != rcvr->sk) { | 243 | if (sk != rcvr->sk) { |
244 | /* Our cached sk is different from the rcvr->sk. This is | 244 | /* Our cached sk is different from the rcvr->sk. This is |
@@ -248,14 +248,14 @@ int sctp_rcv(struct sk_buff *skb) | |||
248 | * be doing something with the new socket. Switch our veiw | 248 | * be doing something with the new socket. Switch our veiw |
249 | * of the current sk. | 249 | * of the current sk. |
250 | */ | 250 | */ |
251 | sctp_bh_unlock_sock(sk); | 251 | bh_unlock_sock(sk); |
252 | sk = rcvr->sk; | 252 | sk = rcvr->sk; |
253 | sctp_bh_lock_sock(sk); | 253 | bh_lock_sock(sk); |
254 | } | 254 | } |
255 | 255 | ||
256 | if (sock_owned_by_user(sk)) { | 256 | if (sock_owned_by_user(sk)) { |
257 | if (sctp_add_backlog(sk, skb)) { | 257 | if (sctp_add_backlog(sk, skb)) { |
258 | sctp_bh_unlock_sock(sk); | 258 | bh_unlock_sock(sk); |
259 | sctp_chunk_free(chunk); | 259 | sctp_chunk_free(chunk); |
260 | skb = NULL; /* sctp_chunk_free already freed the skb */ | 260 | skb = NULL; /* sctp_chunk_free already freed the skb */ |
261 | goto discard_release; | 261 | goto discard_release; |
@@ -266,7 +266,7 @@ int sctp_rcv(struct sk_buff *skb) | |||
266 | sctp_inq_push(&chunk->rcvr->inqueue, chunk); | 266 | sctp_inq_push(&chunk->rcvr->inqueue, chunk); |
267 | } | 267 | } |
268 | 268 | ||
269 | sctp_bh_unlock_sock(sk); | 269 | bh_unlock_sock(sk); |
270 | 270 | ||
271 | /* Release the asoc/ep ref we took in the lookup calls. */ | 271 | /* Release the asoc/ep ref we took in the lookup calls. */ |
272 | if (asoc) | 272 | if (asoc) |
@@ -327,7 +327,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
327 | */ | 327 | */ |
328 | 328 | ||
329 | sk = rcvr->sk; | 329 | sk = rcvr->sk; |
330 | sctp_bh_lock_sock(sk); | 330 | bh_lock_sock(sk); |
331 | 331 | ||
332 | if (sock_owned_by_user(sk)) { | 332 | if (sock_owned_by_user(sk)) { |
333 | if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) | 333 | if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) |
@@ -337,7 +337,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
337 | } else | 337 | } else |
338 | sctp_inq_push(inqueue, chunk); | 338 | sctp_inq_push(inqueue, chunk); |
339 | 339 | ||
340 | sctp_bh_unlock_sock(sk); | 340 | bh_unlock_sock(sk); |
341 | 341 | ||
342 | /* If the chunk was backloged again, don't drop refs */ | 342 | /* If the chunk was backloged again, don't drop refs */ |
343 | if (backloged) | 343 | if (backloged) |
@@ -522,7 +522,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, | |||
522 | goto out; | 522 | goto out; |
523 | } | 523 | } |
524 | 524 | ||
525 | sctp_bh_lock_sock(sk); | 525 | bh_lock_sock(sk); |
526 | 526 | ||
527 | /* If too many ICMPs get dropped on busy | 527 | /* If too many ICMPs get dropped on busy |
528 | * servers this needs to be solved differently. | 528 | * servers this needs to be solved differently. |
@@ -542,7 +542,7 @@ out: | |||
542 | /* Common cleanup code for icmp/icmpv6 error handler. */ | 542 | /* Common cleanup code for icmp/icmpv6 error handler. */ |
543 | void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) | 543 | void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) |
544 | { | 544 | { |
545 | sctp_bh_unlock_sock(sk); | 545 | bh_unlock_sock(sk); |
546 | sctp_association_put(asoc); | 546 | sctp_association_put(asoc); |
547 | } | 547 | } |
548 | 548 | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index d6934dc8dcb6..4e1d0fcb028e 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -634,10 +634,10 @@ static void sctp_addr_wq_timeout_handler(unsigned long arg) | |||
634 | /* ignore bound-specific endpoints */ | 634 | /* ignore bound-specific endpoints */ |
635 | if (!sctp_is_ep_boundall(sk)) | 635 | if (!sctp_is_ep_boundall(sk)) |
636 | continue; | 636 | continue; |
637 | sctp_bh_lock_sock(sk); | 637 | bh_lock_sock(sk); |
638 | if (sctp_asconf_mgmt(sp, addrw) < 0) | 638 | if (sctp_asconf_mgmt(sp, addrw) < 0) |
639 | pr_debug("%s: sctp_asconf_mgmt failed\n", __func__); | 639 | pr_debug("%s: sctp_asconf_mgmt failed\n", __func__); |
640 | sctp_bh_unlock_sock(sk); | 640 | bh_unlock_sock(sk); |
641 | } | 641 | } |
642 | #if IS_ENABLED(CONFIG_IPV6) | 642 | #if IS_ENABLED(CONFIG_IPV6) |
643 | free_next: | 643 | free_next: |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index ded6db66fb24..bd859154000e 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -248,7 +248,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer) | |||
248 | 248 | ||
249 | /* Check whether a task is in the sock. */ | 249 | /* Check whether a task is in the sock. */ |
250 | 250 | ||
251 | sctp_bh_lock_sock(asoc->base.sk); | 251 | bh_lock_sock(asoc->base.sk); |
252 | if (sock_owned_by_user(asoc->base.sk)) { | 252 | if (sock_owned_by_user(asoc->base.sk)) { |
253 | pr_debug("%s: sock is busy\n", __func__); | 253 | pr_debug("%s: sock is busy\n", __func__); |
254 | 254 | ||
@@ -275,7 +275,7 @@ void sctp_generate_t3_rtx_event(unsigned long peer) | |||
275 | asoc->base.sk->sk_err = -error; | 275 | asoc->base.sk->sk_err = -error; |
276 | 276 | ||
277 | out_unlock: | 277 | out_unlock: |
278 | sctp_bh_unlock_sock(asoc->base.sk); | 278 | bh_unlock_sock(asoc->base.sk); |
279 | sctp_transport_put(transport); | 279 | sctp_transport_put(transport); |
280 | } | 280 | } |
281 | 281 | ||
@@ -288,7 +288,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, | |||
288 | struct net *net = sock_net(asoc->base.sk); | 288 | struct net *net = sock_net(asoc->base.sk); |
289 | int error = 0; | 289 | int error = 0; |
290 | 290 | ||
291 | sctp_bh_lock_sock(asoc->base.sk); | 291 | bh_lock_sock(asoc->base.sk); |
292 | if (sock_owned_by_user(asoc->base.sk)) { | 292 | if (sock_owned_by_user(asoc->base.sk)) { |
293 | pr_debug("%s: sock is busy: timer %d\n", __func__, | 293 | pr_debug("%s: sock is busy: timer %d\n", __func__, |
294 | timeout_type); | 294 | timeout_type); |
@@ -315,7 +315,7 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, | |||
315 | asoc->base.sk->sk_err = -error; | 315 | asoc->base.sk->sk_err = -error; |
316 | 316 | ||
317 | out_unlock: | 317 | out_unlock: |
318 | sctp_bh_unlock_sock(asoc->base.sk); | 318 | bh_unlock_sock(asoc->base.sk); |
319 | sctp_association_put(asoc); | 319 | sctp_association_put(asoc); |
320 | } | 320 | } |
321 | 321 | ||
@@ -367,7 +367,7 @@ void sctp_generate_heartbeat_event(unsigned long data) | |||
367 | struct sctp_association *asoc = transport->asoc; | 367 | struct sctp_association *asoc = transport->asoc; |
368 | struct net *net = sock_net(asoc->base.sk); | 368 | struct net *net = sock_net(asoc->base.sk); |
369 | 369 | ||
370 | sctp_bh_lock_sock(asoc->base.sk); | 370 | bh_lock_sock(asoc->base.sk); |
371 | if (sock_owned_by_user(asoc->base.sk)) { | 371 | if (sock_owned_by_user(asoc->base.sk)) { |
372 | pr_debug("%s: sock is busy\n", __func__); | 372 | pr_debug("%s: sock is busy\n", __func__); |
373 | 373 | ||
@@ -392,7 +392,7 @@ void sctp_generate_heartbeat_event(unsigned long data) | |||
392 | asoc->base.sk->sk_err = -error; | 392 | asoc->base.sk->sk_err = -error; |
393 | 393 | ||
394 | out_unlock: | 394 | out_unlock: |
395 | sctp_bh_unlock_sock(asoc->base.sk); | 395 | bh_unlock_sock(asoc->base.sk); |
396 | sctp_transport_put(transport); | 396 | sctp_transport_put(transport); |
397 | } | 397 | } |
398 | 398 | ||
@@ -405,7 +405,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) | |||
405 | struct sctp_association *asoc = transport->asoc; | 405 | struct sctp_association *asoc = transport->asoc; |
406 | struct net *net = sock_net(asoc->base.sk); | 406 | struct net *net = sock_net(asoc->base.sk); |
407 | 407 | ||
408 | sctp_bh_lock_sock(asoc->base.sk); | 408 | bh_lock_sock(asoc->base.sk); |
409 | if (sock_owned_by_user(asoc->base.sk)) { | 409 | if (sock_owned_by_user(asoc->base.sk)) { |
410 | pr_debug("%s: sock is busy\n", __func__); | 410 | pr_debug("%s: sock is busy\n", __func__); |
411 | 411 | ||
@@ -427,7 +427,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) | |||
427 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); | 427 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); |
428 | 428 | ||
429 | out_unlock: | 429 | out_unlock: |
430 | sctp_bh_unlock_sock(asoc->base.sk); | 430 | bh_unlock_sock(asoc->base.sk); |
431 | sctp_association_put(asoc); | 431 | sctp_association_put(asoc); |
432 | } | 432 | } |
433 | 433 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 893aa56c91cc..9e91d6e5df63 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -1511,7 +1511,7 @@ static void sctp_close(struct sock *sk, long timeout) | |||
1511 | * the net layers still may. | 1511 | * the net layers still may. |
1512 | */ | 1512 | */ |
1513 | local_bh_disable(); | 1513 | local_bh_disable(); |
1514 | sctp_bh_lock_sock(sk); | 1514 | bh_lock_sock(sk); |
1515 | 1515 | ||
1516 | /* Hold the sock, since sk_common_release() will put sock_put() | 1516 | /* Hold the sock, since sk_common_release() will put sock_put() |
1517 | * and we have just a little more cleanup. | 1517 | * and we have just a little more cleanup. |
@@ -1519,7 +1519,7 @@ static void sctp_close(struct sock *sk, long timeout) | |||
1519 | sock_hold(sk); | 1519 | sock_hold(sk); |
1520 | sk_common_release(sk); | 1520 | sk_common_release(sk); |
1521 | 1521 | ||
1522 | sctp_bh_unlock_sock(sk); | 1522 | bh_unlock_sock(sk); |
1523 | local_bh_enable(); | 1523 | local_bh_enable(); |
1524 | 1524 | ||
1525 | sock_put(sk); | 1525 | sock_put(sk); |