aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/sock.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/sock.c')
-rw-r--r--net/core/sock.c58
1 files changed, 41 insertions, 17 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 7626b6aacd68..76ff58d43e26 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -274,25 +274,27 @@ static void sock_disable_timestamp(struct sock *sk, int flag)
274 274
275int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 275int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
276{ 276{
277 int err = 0; 277 int err;
278 int skb_len; 278 int skb_len;
279 unsigned long flags;
280 struct sk_buff_head *list = &sk->sk_receive_queue;
279 281
280 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 282 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
281 number of warnings when compiling with -W --ANK 283 number of warnings when compiling with -W --ANK
282 */ 284 */
283 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 285 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
284 (unsigned)sk->sk_rcvbuf) { 286 (unsigned)sk->sk_rcvbuf) {
285 err = -ENOMEM; 287 atomic_inc(&sk->sk_drops);
286 goto out; 288 return -ENOMEM;
287 } 289 }
288 290
289 err = sk_filter(sk, skb); 291 err = sk_filter(sk, skb);
290 if (err) 292 if (err)
291 goto out; 293 return err;
292 294
293 if (!sk_rmem_schedule(sk, skb->truesize)) { 295 if (!sk_rmem_schedule(sk, skb->truesize)) {
294 err = -ENOBUFS; 296 atomic_inc(&sk->sk_drops);
295 goto out; 297 return -ENOBUFS;
296 } 298 }
297 299
298 skb->dev = NULL; 300 skb->dev = NULL;
@@ -305,12 +307,14 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
305 */ 307 */
306 skb_len = skb->len; 308 skb_len = skb->len;
307 309
308 skb_queue_tail(&sk->sk_receive_queue, skb); 310 spin_lock_irqsave(&list->lock, flags);
311 skb->dropcount = atomic_read(&sk->sk_drops);
312 __skb_queue_tail(list, skb);
313 spin_unlock_irqrestore(&list->lock, flags);
309 314
310 if (!sock_flag(sk, SOCK_DEAD)) 315 if (!sock_flag(sk, SOCK_DEAD))
311 sk->sk_data_ready(sk, skb_len); 316 sk->sk_data_ready(sk, skb_len);
312out: 317 return 0;
313 return err;
314} 318}
315EXPORT_SYMBOL(sock_queue_rcv_skb); 319EXPORT_SYMBOL(sock_queue_rcv_skb);
316 320
@@ -348,11 +352,18 @@ discard_and_relse:
348} 352}
349EXPORT_SYMBOL(sk_receive_skb); 353EXPORT_SYMBOL(sk_receive_skb);
350 354
355void sk_reset_txq(struct sock *sk)
356{
357 sk_tx_queue_clear(sk);
358}
359EXPORT_SYMBOL(sk_reset_txq);
360
351struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 361struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
352{ 362{
353 struct dst_entry *dst = sk->sk_dst_cache; 363 struct dst_entry *dst = sk->sk_dst_cache;
354 364
355 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 365 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
366 sk_tx_queue_clear(sk);
356 sk->sk_dst_cache = NULL; 367 sk->sk_dst_cache = NULL;
357 dst_release(dst); 368 dst_release(dst);
358 return NULL; 369 return NULL;
@@ -406,17 +417,18 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
406 if (copy_from_user(devname, optval, optlen)) 417 if (copy_from_user(devname, optval, optlen))
407 goto out; 418 goto out;
408 419
409 if (devname[0] == '\0') { 420 index = 0;
410 index = 0; 421 if (devname[0] != '\0') {
411 } else { 422 struct net_device *dev;
412 struct net_device *dev = dev_get_by_name(net, devname);
413 423
424 rcu_read_lock();
425 dev = dev_get_by_name_rcu(net, devname);
426 if (dev)
427 index = dev->ifindex;
428 rcu_read_unlock();
414 ret = -ENODEV; 429 ret = -ENODEV;
415 if (!dev) 430 if (!dev)
416 goto out; 431 goto out;
417
418 index = dev->ifindex;
419 dev_put(dev);
420 } 432 }
421 433
422 lock_sock(sk); 434 lock_sock(sk);
@@ -702,6 +714,12 @@ set_rcvbuf:
702 714
703 /* We implement the SO_SNDLOWAT etc to 715 /* We implement the SO_SNDLOWAT etc to
704 not be settable (1003.1g 5.3) */ 716 not be settable (1003.1g 5.3) */
717 case SO_RXQ_OVFL:
718 if (valbool)
719 sock_set_flag(sk, SOCK_RXQ_OVFL);
720 else
721 sock_reset_flag(sk, SOCK_RXQ_OVFL);
722 break;
705 default: 723 default:
706 ret = -ENOPROTOOPT; 724 ret = -ENOPROTOOPT;
707 break; 725 break;
@@ -901,6 +919,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
901 v.val = sk->sk_mark; 919 v.val = sk->sk_mark;
902 break; 920 break;
903 921
922 case SO_RXQ_OVFL:
923 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
924 break;
925
904 default: 926 default:
905 return -ENOPROTOOPT; 927 return -ENOPROTOOPT;
906 } 928 }
@@ -939,7 +961,8 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
939 void *sptr = nsk->sk_security; 961 void *sptr = nsk->sk_security;
940#endif 962#endif
941 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != 963 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
942 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt)); 964 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
965 sizeof(osk->sk_tx_queue_mapping));
943 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, 966 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
944 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); 967 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
945#ifdef CONFIG_SECURITY_NETWORK 968#ifdef CONFIG_SECURITY_NETWORK
@@ -983,6 +1006,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
983 1006
984 if (!try_module_get(prot->owner)) 1007 if (!try_module_get(prot->owner))
985 goto out_free_sec; 1008 goto out_free_sec;
1009 sk_tx_queue_clear(sk);
986 } 1010 }
987 1011
988 return sk; 1012 return sk;