diff options
Diffstat (limited to 'net/sctp/input.c')
| -rw-r--r-- | net/sctp/input.c | 65 |
1 files changed, 46 insertions, 19 deletions
diff --git a/net/sctp/input.c b/net/sctp/input.c index c0c973e67add..ea2192444ce6 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
| @@ -53,6 +53,7 @@ | |||
| 53 | #include <linux/socket.h> | 53 | #include <linux/socket.h> |
| 54 | #include <linux/ip.h> | 54 | #include <linux/ip.h> |
| 55 | #include <linux/time.h> /* For struct timeval */ | 55 | #include <linux/time.h> /* For struct timeval */ |
| 56 | #include <linux/slab.h> | ||
| 56 | #include <net/ip.h> | 57 | #include <net/ip.h> |
| 57 | #include <net/icmp.h> | 58 | #include <net/icmp.h> |
| 58 | #include <net/snmp.h> | 59 | #include <net/snmp.h> |
| @@ -75,7 +76,7 @@ static struct sctp_association *__sctp_lookup_association( | |||
| 75 | const union sctp_addr *peer, | 76 | const union sctp_addr *peer, |
| 76 | struct sctp_transport **pt); | 77 | struct sctp_transport **pt); |
| 77 | 78 | ||
| 78 | static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); | 79 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb); |
| 79 | 80 | ||
| 80 | 81 | ||
| 81 | /* Calculate the SCTP checksum of an SCTP packet. */ | 82 | /* Calculate the SCTP checksum of an SCTP packet. */ |
| @@ -265,8 +266,13 @@ int sctp_rcv(struct sk_buff *skb) | |||
| 265 | } | 266 | } |
| 266 | 267 | ||
| 267 | if (sock_owned_by_user(sk)) { | 268 | if (sock_owned_by_user(sk)) { |
| 269 | if (sctp_add_backlog(sk, skb)) { | ||
| 270 | sctp_bh_unlock_sock(sk); | ||
| 271 | sctp_chunk_free(chunk); | ||
| 272 | skb = NULL; /* sctp_chunk_free already freed the skb */ | ||
| 273 | goto discard_release; | ||
| 274 | } | ||
| 268 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); | 275 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); |
| 269 | sctp_add_backlog(sk, skb); | ||
| 270 | } else { | 276 | } else { |
| 271 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); | 277 | SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); |
| 272 | sctp_inq_push(&chunk->rcvr->inqueue, chunk); | 278 | sctp_inq_push(&chunk->rcvr->inqueue, chunk); |
| @@ -336,8 +342,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 336 | sctp_bh_lock_sock(sk); | 342 | sctp_bh_lock_sock(sk); |
| 337 | 343 | ||
| 338 | if (sock_owned_by_user(sk)) { | 344 | if (sock_owned_by_user(sk)) { |
| 339 | sk_add_backlog(sk, skb); | 345 | if (sk_add_backlog(sk, skb)) |
| 340 | backloged = 1; | 346 | sctp_chunk_free(chunk); |
| 347 | else | ||
| 348 | backloged = 1; | ||
| 341 | } else | 349 | } else |
| 342 | sctp_inq_push(inqueue, chunk); | 350 | sctp_inq_push(inqueue, chunk); |
| 343 | 351 | ||
| @@ -362,22 +370,27 @@ done: | |||
| 362 | return 0; | 370 | return 0; |
| 363 | } | 371 | } |
| 364 | 372 | ||
| 365 | static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) | 373 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) |
| 366 | { | 374 | { |
| 367 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 375 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
| 368 | struct sctp_ep_common *rcvr = chunk->rcvr; | 376 | struct sctp_ep_common *rcvr = chunk->rcvr; |
| 377 | int ret; | ||
| 369 | 378 | ||
| 370 | /* Hold the assoc/ep while hanging on the backlog queue. | 379 | ret = sk_add_backlog(sk, skb); |
| 371 | * This way, we know structures we need will not disappear from us | 380 | if (!ret) { |
| 372 | */ | 381 | /* Hold the assoc/ep while hanging on the backlog queue. |
| 373 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 382 | * This way, we know structures we need will not disappear |
| 374 | sctp_association_hold(sctp_assoc(rcvr)); | 383 | * from us |
| 375 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | 384 | */ |
| 376 | sctp_endpoint_hold(sctp_ep(rcvr)); | 385 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) |
| 377 | else | 386 | sctp_association_hold(sctp_assoc(rcvr)); |
| 378 | BUG(); | 387 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) |
| 388 | sctp_endpoint_hold(sctp_ep(rcvr)); | ||
| 389 | else | ||
| 390 | BUG(); | ||
| 391 | } | ||
| 392 | return ret; | ||
| 379 | 393 | ||
| 380 | sk_add_backlog(sk, skb); | ||
| 381 | } | 394 | } |
| 382 | 395 | ||
| 383 | /* Handle icmp frag needed error. */ | 396 | /* Handle icmp frag needed error. */ |
| @@ -427,11 +440,25 @@ void sctp_icmp_proto_unreachable(struct sock *sk, | |||
| 427 | { | 440 | { |
| 428 | SCTP_DEBUG_PRINTK("%s\n", __func__); | 441 | SCTP_DEBUG_PRINTK("%s\n", __func__); |
| 429 | 442 | ||
| 430 | sctp_do_sm(SCTP_EVENT_T_OTHER, | 443 | if (sock_owned_by_user(sk)) { |
| 431 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | 444 | if (timer_pending(&t->proto_unreach_timer)) |
| 432 | asoc->state, asoc->ep, asoc, t, | 445 | return; |
| 433 | GFP_ATOMIC); | 446 | else { |
| 447 | if (!mod_timer(&t->proto_unreach_timer, | ||
| 448 | jiffies + (HZ/20))) | ||
| 449 | sctp_association_hold(asoc); | ||
| 450 | } | ||
| 451 | |||
| 452 | } else { | ||
| 453 | if (timer_pending(&t->proto_unreach_timer) && | ||
| 454 | del_timer(&t->proto_unreach_timer)) | ||
| 455 | sctp_association_put(asoc); | ||
| 434 | 456 | ||
| 457 | sctp_do_sm(SCTP_EVENT_T_OTHER, | ||
| 458 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | ||
| 459 | asoc->state, asoc->ep, asoc, t, | ||
| 460 | GFP_ATOMIC); | ||
| 461 | } | ||
| 435 | } | 462 | } |
| 436 | 463 | ||
| 437 | /* Common lookup code for icmp/icmpv6 error handler. */ | 464 | /* Common lookup code for icmp/icmpv6 error handler. */ |
