aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c23
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/sock.c49
5 files changed, 57 insertions, 21 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 58abee1f1df1..b0fe69211eef 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -712,7 +712,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
712 struct sock *sk = sock->sk; 712 struct sock *sk = sock->sk;
713 unsigned int mask; 713 unsigned int mask;
714 714
715 poll_wait(file, sk->sk_sleep, wait); 715 sock_poll_wait(file, sk->sk_sleep, wait);
716 mask = 0; 716 mask = 0;
717 717
718 /* exceptional events? */ 718 /* exceptional events? */
diff --git a/net/core/dev.c b/net/core/dev.c
index 70c27e0c7c32..6a94475aee85 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3865,10 +3865,12 @@ int dev_unicast_delete(struct net_device *dev, void *addr)
3865 3865
3866 ASSERT_RTNL(); 3866 ASSERT_RTNL();
3867 3867
3868 netif_addr_lock_bh(dev);
3868 err = __hw_addr_del(&dev->uc, addr, dev->addr_len, 3869 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3869 NETDEV_HW_ADDR_T_UNICAST); 3870 NETDEV_HW_ADDR_T_UNICAST);
3870 if (!err) 3871 if (!err)
3871 __dev_set_rx_mode(dev); 3872 __dev_set_rx_mode(dev);
3873 netif_addr_unlock_bh(dev);
3872 return err; 3874 return err;
3873} 3875}
3874EXPORT_SYMBOL(dev_unicast_delete); 3876EXPORT_SYMBOL(dev_unicast_delete);
@@ -3889,10 +3891,12 @@ int dev_unicast_add(struct net_device *dev, void *addr)
3889 3891
3890 ASSERT_RTNL(); 3892 ASSERT_RTNL();
3891 3893
3894 netif_addr_lock_bh(dev);
3892 err = __hw_addr_add(&dev->uc, addr, dev->addr_len, 3895 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3893 NETDEV_HW_ADDR_T_UNICAST); 3896 NETDEV_HW_ADDR_T_UNICAST);
3894 if (!err) 3897 if (!err)
3895 __dev_set_rx_mode(dev); 3898 __dev_set_rx_mode(dev);
3899 netif_addr_unlock_bh(dev);
3896 return err; 3900 return err;
3897} 3901}
3898EXPORT_SYMBOL(dev_unicast_add); 3902EXPORT_SYMBOL(dev_unicast_add);
@@ -3949,7 +3953,8 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3949 * @from: source device 3953 * @from: source device
3950 * 3954 *
3951 * Add newly added addresses to the destination device and release 3955 * Add newly added addresses to the destination device and release
3952 * addresses that have no users left. 3956 * addresses that have no users left. The source device must be
3957 * locked by netif_tx_lock_bh.
3953 * 3958 *
3954 * This function is intended to be called from the dev->set_rx_mode 3959 * This function is intended to be called from the dev->set_rx_mode
3955 * function of layered software devices. 3960 * function of layered software devices.
@@ -3958,14 +3963,14 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
3958{ 3963{
3959 int err = 0; 3964 int err = 0;
3960 3965
3961 ASSERT_RTNL();
3962
3963 if (to->addr_len != from->addr_len) 3966 if (to->addr_len != from->addr_len)
3964 return -EINVAL; 3967 return -EINVAL;
3965 3968
3969 netif_addr_lock_bh(to);
3966 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); 3970 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
3967 if (!err) 3971 if (!err)
3968 __dev_set_rx_mode(to); 3972 __dev_set_rx_mode(to);
3973 netif_addr_unlock_bh(to);
3969 return err; 3974 return err;
3970} 3975}
3971EXPORT_SYMBOL(dev_unicast_sync); 3976EXPORT_SYMBOL(dev_unicast_sync);
@@ -3981,27 +3986,27 @@ EXPORT_SYMBOL(dev_unicast_sync);
3981 */ 3986 */
3982void dev_unicast_unsync(struct net_device *to, struct net_device *from) 3987void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3983{ 3988{
3984 ASSERT_RTNL();
3985
3986 if (to->addr_len != from->addr_len) 3989 if (to->addr_len != from->addr_len)
3987 return; 3990 return;
3988 3991
3992 netif_addr_lock_bh(from);
3993 netif_addr_lock(to);
3989 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); 3994 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
3990 __dev_set_rx_mode(to); 3995 __dev_set_rx_mode(to);
3996 netif_addr_unlock(to);
3997 netif_addr_unlock_bh(from);
3991} 3998}
3992EXPORT_SYMBOL(dev_unicast_unsync); 3999EXPORT_SYMBOL(dev_unicast_unsync);
3993 4000
3994static void dev_unicast_flush(struct net_device *dev) 4001static void dev_unicast_flush(struct net_device *dev)
3995{ 4002{
3996 /* rtnl_mutex must be held here */ 4003 netif_addr_lock_bh(dev);
3997
3998 __hw_addr_flush(&dev->uc); 4004 __hw_addr_flush(&dev->uc);
4005 netif_addr_unlock_bh(dev);
3999} 4006}
4000 4007
4001static void dev_unicast_init(struct net_device *dev) 4008static void dev_unicast_init(struct net_device *dev)
4002{ 4009{
4003 /* rtnl_mutex must be held here */
4004
4005 __hw_addr_init(&dev->uc); 4010 __hw_addr_init(&dev->uc);
4006} 4011}
4007 4012
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index b7292a2719dc..197283072cc8 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -488,7 +488,7 @@ int net_assign_generic(struct net *net, int id, void *data)
488 */ 488 */
489 489
490 ng->len = id; 490 ng->len = id;
491 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len); 491 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
492 492
493 rcu_assign_pointer(net->gen, ng); 493 rcu_assign_pointer(net->gen, ng);
494 call_rcu(&old_ng->rcu, net_generic_release); 494 call_rcu(&old_ng->rcu, net_generic_release);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 9675f312830d..df30feb2fc72 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -740,7 +740,7 @@ int netpoll_setup(struct netpoll *np)
740 np->name); 740 np->name);
741 break; 741 break;
742 } 742 }
743 cond_resched(); 743 msleep(1);
744 } 744 }
745 745
746 /* If carrier appears to come up instantly, we don't 746 /* If carrier appears to come up instantly, we don't
diff --git a/net/core/sock.c b/net/core/sock.c
index b0ba569bc973..bbb25be7ddfe 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -631,7 +631,7 @@ set_rcvbuf:
631 631
632 case SO_TIMESTAMPING: 632 case SO_TIMESTAMPING:
633 if (val & ~SOF_TIMESTAMPING_MASK) { 633 if (val & ~SOF_TIMESTAMPING_MASK) {
634 ret = EINVAL; 634 ret = -EINVAL;
635 break; 635 break;
636 } 636 }
637 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 637 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
@@ -919,13 +919,19 @@ static inline void sock_lock_init(struct sock *sk)
919 af_family_keys + sk->sk_family); 919 af_family_keys + sk->sk_family);
920} 920}
921 921
922/*
923 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
924 * even temporarly, because of RCU lookups. sk_node should also be left as is.
925 */
922static void sock_copy(struct sock *nsk, const struct sock *osk) 926static void sock_copy(struct sock *nsk, const struct sock *osk)
923{ 927{
924#ifdef CONFIG_SECURITY_NETWORK 928#ifdef CONFIG_SECURITY_NETWORK
925 void *sptr = nsk->sk_security; 929 void *sptr = nsk->sk_security;
926#endif 930#endif
927 931 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
928 memcpy(nsk, osk, osk->sk_prot->obj_size); 932 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt));
933 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
934 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
929#ifdef CONFIG_SECURITY_NETWORK 935#ifdef CONFIG_SECURITY_NETWORK
930 nsk->sk_security = sptr; 936 nsk->sk_security = sptr;
931 security_sk_clone(osk, nsk); 937 security_sk_clone(osk, nsk);
@@ -939,8 +945,23 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
939 struct kmem_cache *slab; 945 struct kmem_cache *slab;
940 946
941 slab = prot->slab; 947 slab = prot->slab;
942 if (slab != NULL) 948 if (slab != NULL) {
943 sk = kmem_cache_alloc(slab, priority); 949 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
950 if (!sk)
951 return sk;
952 if (priority & __GFP_ZERO) {
953 /*
954 * caches using SLAB_DESTROY_BY_RCU should let
955 * sk_node.next un-modified. Special care is taken
956 * when initializing object to zero.
957 */
958 if (offsetof(struct sock, sk_node.next) != 0)
959 memset(sk, 0, offsetof(struct sock, sk_node.next));
960 memset(&sk->sk_node.pprev, 0,
961 prot->obj_size - offsetof(struct sock,
962 sk_node.pprev));
963 }
964 }
944 else 965 else
945 sk = kmalloc(prot->obj_size, priority); 966 sk = kmalloc(prot->obj_size, priority);
946 967
@@ -1125,6 +1146,11 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1125 1146
1126 newsk->sk_err = 0; 1147 newsk->sk_err = 0;
1127 newsk->sk_priority = 0; 1148 newsk->sk_priority = 0;
1149 /*
1150 * Before updating sk_refcnt, we must commit prior changes to memory
1151 * (Documentation/RCU/rculist_nulls.txt for details)
1152 */
1153 smp_wmb();
1128 atomic_set(&newsk->sk_refcnt, 2); 1154 atomic_set(&newsk->sk_refcnt, 2);
1129 1155
1130 /* 1156 /*
@@ -1715,7 +1741,7 @@ EXPORT_SYMBOL(sock_no_sendpage);
1715static void sock_def_wakeup(struct sock *sk) 1741static void sock_def_wakeup(struct sock *sk)
1716{ 1742{
1717 read_lock(&sk->sk_callback_lock); 1743 read_lock(&sk->sk_callback_lock);
1718 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1744 if (sk_has_sleeper(sk))
1719 wake_up_interruptible_all(sk->sk_sleep); 1745 wake_up_interruptible_all(sk->sk_sleep);
1720 read_unlock(&sk->sk_callback_lock); 1746 read_unlock(&sk->sk_callback_lock);
1721} 1747}
@@ -1723,7 +1749,7 @@ static void sock_def_wakeup(struct sock *sk)
1723static void sock_def_error_report(struct sock *sk) 1749static void sock_def_error_report(struct sock *sk)
1724{ 1750{
1725 read_lock(&sk->sk_callback_lock); 1751 read_lock(&sk->sk_callback_lock);
1726 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1752 if (sk_has_sleeper(sk))
1727 wake_up_interruptible_poll(sk->sk_sleep, POLLERR); 1753 wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
1728 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1754 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1729 read_unlock(&sk->sk_callback_lock); 1755 read_unlock(&sk->sk_callback_lock);
@@ -1732,7 +1758,7 @@ static void sock_def_error_report(struct sock *sk)
1732static void sock_def_readable(struct sock *sk, int len) 1758static void sock_def_readable(struct sock *sk, int len)
1733{ 1759{
1734 read_lock(&sk->sk_callback_lock); 1760 read_lock(&sk->sk_callback_lock);
1735 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1761 if (sk_has_sleeper(sk))
1736 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | 1762 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
1737 POLLRDNORM | POLLRDBAND); 1763 POLLRDNORM | POLLRDBAND);
1738 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 1764 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
@@ -1747,7 +1773,7 @@ static void sock_def_write_space(struct sock *sk)
1747 * progress. --DaveM 1773 * progress. --DaveM
1748 */ 1774 */
1749 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 1775 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1750 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1776 if (sk_has_sleeper(sk))
1751 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | 1777 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
1752 POLLWRNORM | POLLWRBAND); 1778 POLLWRNORM | POLLWRBAND);
1753 1779
@@ -1840,6 +1866,11 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1840 1866
1841 sk->sk_stamp = ktime_set(-1L, 0); 1867 sk->sk_stamp = ktime_set(-1L, 0);
1842 1868
1869 /*
1870 * Before updating sk_refcnt, we must commit prior changes to memory
1871 * (Documentation/RCU/rculist_nulls.txt for details)
1872 */
1873 smp_wmb();
1843 atomic_set(&sk->sk_refcnt, 1); 1874 atomic_set(&sk->sk_refcnt, 1);
1844 atomic_set(&sk->sk_wmem_alloc, 1); 1875 atomic_set(&sk->sk_wmem_alloc, 1);
1845 atomic_set(&sk->sk_drops, 0); 1876 atomic_set(&sk->sk_drops, 0);