aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/sock.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/sock.c')
-rw-r--r--net/core/sock.c43
1 files changed, 33 insertions, 10 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 7626b6aacd68..5a51512f638a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -274,25 +274,27 @@ static void sock_disable_timestamp(struct sock *sk, int flag)
274 274
275int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 275int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
276{ 276{
277 int err = 0; 277 int err;
278 int skb_len; 278 int skb_len;
279 unsigned long flags;
280 struct sk_buff_head *list = &sk->sk_receive_queue;
279 281
280 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 282 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
281 number of warnings when compiling with -W --ANK 283 number of warnings when compiling with -W --ANK
282 */ 284 */
283 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 285 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
284 (unsigned)sk->sk_rcvbuf) { 286 (unsigned)sk->sk_rcvbuf) {
285 err = -ENOMEM; 287 atomic_inc(&sk->sk_drops);
286 goto out; 288 return -ENOMEM;
287 } 289 }
288 290
289 err = sk_filter(sk, skb); 291 err = sk_filter(sk, skb);
290 if (err) 292 if (err)
291 goto out; 293 return err;
292 294
293 if (!sk_rmem_schedule(sk, skb->truesize)) { 295 if (!sk_rmem_schedule(sk, skb->truesize)) {
294 err = -ENOBUFS; 296 atomic_inc(&sk->sk_drops);
295 goto out; 297 return -ENOBUFS;
296 } 298 }
297 299
298 skb->dev = NULL; 300 skb->dev = NULL;
@@ -305,12 +307,14 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
305 */ 307 */
306 skb_len = skb->len; 308 skb_len = skb->len;
307 309
308 skb_queue_tail(&sk->sk_receive_queue, skb); 310 spin_lock_irqsave(&list->lock, flags);
311 skb->dropcount = atomic_read(&sk->sk_drops);
312 __skb_queue_tail(list, skb);
313 spin_unlock_irqrestore(&list->lock, flags);
309 314
310 if (!sock_flag(sk, SOCK_DEAD)) 315 if (!sock_flag(sk, SOCK_DEAD))
311 sk->sk_data_ready(sk, skb_len); 316 sk->sk_data_ready(sk, skb_len);
312out: 317 return 0;
313 return err;
314} 318}
315EXPORT_SYMBOL(sock_queue_rcv_skb); 319EXPORT_SYMBOL(sock_queue_rcv_skb);
316 320
@@ -348,11 +352,18 @@ discard_and_relse:
348} 352}
349EXPORT_SYMBOL(sk_receive_skb); 353EXPORT_SYMBOL(sk_receive_skb);
350 354
355void sk_reset_txq(struct sock *sk)
356{
357 sk_tx_queue_clear(sk);
358}
359EXPORT_SYMBOL(sk_reset_txq);
360
351struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 361struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
352{ 362{
353 struct dst_entry *dst = sk->sk_dst_cache; 363 struct dst_entry *dst = sk->sk_dst_cache;
354 364
355 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 365 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
366 sk_tx_queue_clear(sk);
356 sk->sk_dst_cache = NULL; 367 sk->sk_dst_cache = NULL;
357 dst_release(dst); 368 dst_release(dst);
358 return NULL; 369 return NULL;
@@ -702,6 +713,12 @@ set_rcvbuf:
702 713
703 /* We implement the SO_SNDLOWAT etc to 714 /* We implement the SO_SNDLOWAT etc to
704 not be settable (1003.1g 5.3) */ 715 not be settable (1003.1g 5.3) */
716 case SO_RXQ_OVFL:
717 if (valbool)
718 sock_set_flag(sk, SOCK_RXQ_OVFL);
719 else
720 sock_reset_flag(sk, SOCK_RXQ_OVFL);
721 break;
705 default: 722 default:
706 ret = -ENOPROTOOPT; 723 ret = -ENOPROTOOPT;
707 break; 724 break;
@@ -901,6 +918,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
901 v.val = sk->sk_mark; 918 v.val = sk->sk_mark;
902 break; 919 break;
903 920
921 case SO_RXQ_OVFL:
922 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
923 break;
924
904 default: 925 default:
905 return -ENOPROTOOPT; 926 return -ENOPROTOOPT;
906 } 927 }
@@ -939,7 +960,8 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
939 void *sptr = nsk->sk_security; 960 void *sptr = nsk->sk_security;
940#endif 961#endif
941 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != 962 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
942 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt)); 963 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
964 sizeof(osk->sk_tx_queue_mapping));
943 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, 965 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
944 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); 966 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
945#ifdef CONFIG_SECURITY_NETWORK 967#ifdef CONFIG_SECURITY_NETWORK
@@ -983,6 +1005,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
983 1005
984 if (!try_module_get(prot->owner)) 1006 if (!try_module_get(prot->owner))
985 goto out_free_sec; 1007 goto out_free_sec;
1008 sk_tx_queue_clear(sk);
986 } 1009 }
987 1010
988 return sk; 1011 return sk;