aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp
diff options
context:
space:
mode:
authorZhu Yi <yi.zhu@intel.com>2010-03-04 13:01:44 -0500
committerDavid S. Miller <davem@davemloft.net>2010-03-05 16:34:01 -0500
commit50b1a782f845140f4138f14a1ce8a4a6dd0cc82f (patch)
tree3cb1df5d01fb46ca4fef4146dc50eb455643e252 /net/sctp
parent79545b681961d7001c1f4c3eb9ffb87bed4485db (diff)
sctp: use limited socket backlog
Make sctp adapt to the limited socket backlog change. Cc: Vlad Yasevich <vladislav.yasevich@hp.com> Cc: Sridhar Samudrala <sri@us.ibm.com> Signed-off-by: Zhu Yi <yi.zhu@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/input.c42
-rw-r--r--net/sctp/socket.c3
2 files changed, 30 insertions, 15 deletions
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c0c973e67ad..cbc063665e6 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association(
75 const union sctp_addr *peer, 75 const union sctp_addr *peer,
76 struct sctp_transport **pt); 76 struct sctp_transport **pt);
77 77
78static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); 78static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
79 79
80 80
81/* Calculate the SCTP checksum of an SCTP packet. */ 81/* Calculate the SCTP checksum of an SCTP packet. */
@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb)
265 } 265 }
266 266
267 if (sock_owned_by_user(sk)) { 267 if (sock_owned_by_user(sk)) {
268 if (sctp_add_backlog(sk, skb)) {
269 sctp_bh_unlock_sock(sk);
270 sctp_chunk_free(chunk);
271 skb = NULL; /* sctp_chunk_free already freed the skb */
272 goto discard_release;
273 }
268 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); 274 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
269 sctp_add_backlog(sk, skb);
270 } else { 275 } else {
271 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); 276 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
272 sctp_inq_push(&chunk->rcvr->inqueue, chunk); 277 sctp_inq_push(&chunk->rcvr->inqueue, chunk);
@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
336 sctp_bh_lock_sock(sk); 341 sctp_bh_lock_sock(sk);
337 342
338 if (sock_owned_by_user(sk)) { 343 if (sock_owned_by_user(sk)) {
339 sk_add_backlog(sk, skb); 344 if (sk_add_backlog_limited(sk, skb))
340 backloged = 1; 345 sctp_chunk_free(chunk);
346 else
347 backloged = 1;
341 } else 348 } else
342 sctp_inq_push(inqueue, chunk); 349 sctp_inq_push(inqueue, chunk);
343 350
@@ -362,22 +369,27 @@ done:
362 return 0; 369 return 0;
363} 370}
364 371
365static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) 372static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
366{ 373{
367 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 374 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
368 struct sctp_ep_common *rcvr = chunk->rcvr; 375 struct sctp_ep_common *rcvr = chunk->rcvr;
376 int ret;
369 377
370 /* Hold the assoc/ep while hanging on the backlog queue. 378 ret = sk_add_backlog_limited(sk, skb);
371 * This way, we know structures we need will not disappear from us 379 if (!ret) {
372 */ 380 /* Hold the assoc/ep while hanging on the backlog queue.
373 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 381 * This way, we know structures we need will not disappear
374 sctp_association_hold(sctp_assoc(rcvr)); 382 * from us
375 else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 383 */
376 sctp_endpoint_hold(sctp_ep(rcvr)); 384 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
377 else 385 sctp_association_hold(sctp_assoc(rcvr));
378 BUG(); 386 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
387 sctp_endpoint_hold(sctp_ep(rcvr));
388 else
389 BUG();
390 }
391 return ret;
379 392
380 sk_add_backlog(sk, skb);
381} 393}
382 394
383/* Handle icmp frag needed error. */ 395/* Handle icmp frag needed error. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f6d1e59c415..dfc5c127efd 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3720 SCTP_DBG_OBJCNT_INC(sock); 3720 SCTP_DBG_OBJCNT_INC(sock);
3721 percpu_counter_inc(&sctp_sockets_allocated); 3721 percpu_counter_inc(&sctp_sockets_allocated);
3722 3722
3723 /* Set socket backlog limit. */
3724 sk->sk_backlog.limit = sysctl_sctp_rmem[1];
3725
3723 local_bh_disable(); 3726 local_bh_disable();
3724 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3727 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
3725 local_bh_enable(); 3728 local_bh_enable();