diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-03-15 10:27:06 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-03-15 10:27:06 -0400 |
commit | 2d3b5fa3a39d16c880bda3cf2bd9dd6ed5a01f74 (patch) | |
tree | e20283fe2ed46aa35c8ca5fc1724ba067cd2e2f8 /include/net/sock.h | |
parent | 3f17522ce461a31e7ced6311b28fcf5b8a763316 (diff) | |
parent | 7278a22143b003e9af7b9ca1b5f1c40ae4b55d98 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/genesis-2.6
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 17 |
1 files changed, 15 insertions, 2 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 6cb1676e409a..092b0551e77f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -253,6 +253,8 @@ struct sock { | |||
253 | struct { | 253 | struct { |
254 | struct sk_buff *head; | 254 | struct sk_buff *head; |
255 | struct sk_buff *tail; | 255 | struct sk_buff *tail; |
256 | int len; | ||
257 | int limit; | ||
256 | } sk_backlog; | 258 | } sk_backlog; |
257 | wait_queue_head_t *sk_sleep; | 259 | wait_queue_head_t *sk_sleep; |
258 | struct dst_entry *sk_dst_cache; | 260 | struct dst_entry *sk_dst_cache; |
@@ -589,8 +591,8 @@ static inline int sk_stream_memory_free(struct sock *sk) | |||
589 | return sk->sk_wmem_queued < sk->sk_sndbuf; | 591 | return sk->sk_wmem_queued < sk->sk_sndbuf; |
590 | } | 592 | } |
591 | 593 | ||
592 | /* The per-socket spinlock must be held here. */ | 594 | /* OOB backlog add */ |
593 | static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) | 595 | static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) |
594 | { | 596 | { |
595 | if (!sk->sk_backlog.tail) { | 597 | if (!sk->sk_backlog.tail) { |
596 | sk->sk_backlog.head = sk->sk_backlog.tail = skb; | 598 | sk->sk_backlog.head = sk->sk_backlog.tail = skb; |
@@ -601,6 +603,17 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
601 | skb->next = NULL; | 603 | skb->next = NULL; |
602 | } | 604 | } |
603 | 605 | ||
606 | /* The per-socket spinlock must be held here. */ | ||
607 | static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) | ||
608 | { | ||
609 | if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1)) | ||
610 | return -ENOBUFS; | ||
611 | |||
612 | __sk_add_backlog(sk, skb); | ||
613 | sk->sk_backlog.len += skb->truesize; | ||
614 | return 0; | ||
615 | } | ||
616 | |||
604 | static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) | 617 | static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
605 | { | 618 | { |
606 | return sk->sk_backlog_rcv(sk, skb); | 619 | return sk->sk_backlog_rcv(sk, skb); |