aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/sock.h15
-rw-r--r--net/core/sock.c16
2 files changed, 28 insertions, 3 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 6cb1676e409a..2516d76f043c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -253,6 +253,8 @@ struct sock {
253 struct { 253 struct {
254 struct sk_buff *head; 254 struct sk_buff *head;
255 struct sk_buff *tail; 255 struct sk_buff *tail;
256 int len;
257 int limit;
256 } sk_backlog; 258 } sk_backlog;
257 wait_queue_head_t *sk_sleep; 259 wait_queue_head_t *sk_sleep;
258 struct dst_entry *sk_dst_cache; 260 struct dst_entry *sk_dst_cache;
@@ -589,7 +591,7 @@ static inline int sk_stream_memory_free(struct sock *sk)
589 return sk->sk_wmem_queued < sk->sk_sndbuf; 591 return sk->sk_wmem_queued < sk->sk_sndbuf;
590} 592}
591 593
592/* The per-socket spinlock must be held here. */ 594/* OOB backlog add */
593static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) 595static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
594{ 596{
595 if (!sk->sk_backlog.tail) { 597 if (!sk->sk_backlog.tail) {
@@ -601,6 +603,17 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
601 skb->next = NULL; 603 skb->next = NULL;
602} 604}
603 605
606/* The per-socket spinlock must be held here. */
607static inline int sk_add_backlog_limited(struct sock *sk, struct sk_buff *skb)
608{
609 if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
610 return -ENOBUFS;
611
612 sk_add_backlog(sk, skb);
613 sk->sk_backlog.len += skb->truesize;
614 return 0;
615}
616
604static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 617static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
605{ 618{
606 return sk->sk_backlog_rcv(sk, skb); 619 return sk->sk_backlog_rcv(sk, skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index fcd397a762ff..6e22dc973d23 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
340 rc = sk_backlog_rcv(sk, skb); 340 rc = sk_backlog_rcv(sk, skb);
341 341
342 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 342 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
343 } else 343 } else if (sk_add_backlog_limited(sk, skb)) {
344 sk_add_backlog(sk, skb); 344 bh_unlock_sock(sk);
345 atomic_inc(&sk->sk_drops);
346 goto discard_and_relse;
347 }
348
345 bh_unlock_sock(sk); 349 bh_unlock_sock(sk);
346out: 350out:
347 sock_put(sk); 351 sock_put(sk);
@@ -1139,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1139 sock_lock_init(newsk); 1143 sock_lock_init(newsk);
1140 bh_lock_sock(newsk); 1144 bh_lock_sock(newsk);
1141 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1145 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1146 newsk->sk_backlog.len = 0;
1142 1147
1143 atomic_set(&newsk->sk_rmem_alloc, 0); 1148 atomic_set(&newsk->sk_rmem_alloc, 0);
1144 /* 1149 /*
@@ -1542,6 +1547,12 @@ static void __release_sock(struct sock *sk)
1542 1547
1543 bh_lock_sock(sk); 1548 bh_lock_sock(sk);
1544 } while ((skb = sk->sk_backlog.head) != NULL); 1549 } while ((skb = sk->sk_backlog.head) != NULL);
1550
1551 /*
1552 * Doing the zeroing here guarantee we can not loop forever
1553 * while a wild producer attempts to flood us.
1554 */
1555 sk->sk_backlog.len = 0;
1545} 1556}
1546 1557
1547/** 1558/**
@@ -1874,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1874 sk->sk_allocation = GFP_KERNEL; 1885 sk->sk_allocation = GFP_KERNEL;
1875 sk->sk_rcvbuf = sysctl_rmem_default; 1886 sk->sk_rcvbuf = sysctl_rmem_default;
1876 sk->sk_sndbuf = sysctl_wmem_default; 1887 sk->sk_sndbuf = sysctl_wmem_default;
1888 sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
1877 sk->sk_state = TCP_CLOSE; 1889 sk->sk_state = TCP_CLOSE;
1878 sk_set_socket(sk, sock); 1890 sk_set_socket(sk, sock);
1879 1891