diff options
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/datagram.c | 2 | ||||
-rw-r--r-- | net/core/dev.c | 23 | ||||
-rw-r--r-- | net/core/gen_estimator.c | 12 | ||||
-rw-r--r-- | net/core/gen_stats.c | 11 | ||||
-rw-r--r-- | net/core/net_namespace.c | 2 | ||||
-rw-r--r-- | net/core/netpoll.c | 2 | ||||
-rw-r--r-- | net/core/sock.c | 49 |
7 files changed, 71 insertions, 30 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c index 58abee1f1df1..b0fe69211eef 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -712,7 +712,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock, | |||
712 | struct sock *sk = sock->sk; | 712 | struct sock *sk = sock->sk; |
713 | unsigned int mask; | 713 | unsigned int mask; |
714 | 714 | ||
715 | poll_wait(file, sk->sk_sleep, wait); | 715 | sock_poll_wait(file, sk->sk_sleep, wait); |
716 | mask = 0; | 716 | mask = 0; |
717 | 717 | ||
718 | /* exceptional events? */ | 718 | /* exceptional events? */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 70c27e0c7c32..6a94475aee85 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3865,10 +3865,12 @@ int dev_unicast_delete(struct net_device *dev, void *addr) | |||
3865 | 3865 | ||
3866 | ASSERT_RTNL(); | 3866 | ASSERT_RTNL(); |
3867 | 3867 | ||
3868 | netif_addr_lock_bh(dev); | ||
3868 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, | 3869 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, |
3869 | NETDEV_HW_ADDR_T_UNICAST); | 3870 | NETDEV_HW_ADDR_T_UNICAST); |
3870 | if (!err) | 3871 | if (!err) |
3871 | __dev_set_rx_mode(dev); | 3872 | __dev_set_rx_mode(dev); |
3873 | netif_addr_unlock_bh(dev); | ||
3872 | return err; | 3874 | return err; |
3873 | } | 3875 | } |
3874 | EXPORT_SYMBOL(dev_unicast_delete); | 3876 | EXPORT_SYMBOL(dev_unicast_delete); |
@@ -3889,10 +3891,12 @@ int dev_unicast_add(struct net_device *dev, void *addr) | |||
3889 | 3891 | ||
3890 | ASSERT_RTNL(); | 3892 | ASSERT_RTNL(); |
3891 | 3893 | ||
3894 | netif_addr_lock_bh(dev); | ||
3892 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, | 3895 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, |
3893 | NETDEV_HW_ADDR_T_UNICAST); | 3896 | NETDEV_HW_ADDR_T_UNICAST); |
3894 | if (!err) | 3897 | if (!err) |
3895 | __dev_set_rx_mode(dev); | 3898 | __dev_set_rx_mode(dev); |
3899 | netif_addr_unlock_bh(dev); | ||
3896 | return err; | 3900 | return err; |
3897 | } | 3901 | } |
3898 | EXPORT_SYMBOL(dev_unicast_add); | 3902 | EXPORT_SYMBOL(dev_unicast_add); |
@@ -3949,7 +3953,8 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | |||
3949 | * @from: source device | 3953 | * @from: source device |
3950 | * | 3954 | * |
3951 | * Add newly added addresses to the destination device and release | 3955 | * Add newly added addresses to the destination device and release |
3952 | * addresses that have no users left. | 3956 | * addresses that have no users left. The source device must be |
3957 | * locked by netif_tx_lock_bh. | ||
3953 | * | 3958 | * |
3954 | * This function is intended to be called from the dev->set_rx_mode | 3959 | * This function is intended to be called from the dev->set_rx_mode |
3955 | * function of layered software devices. | 3960 | * function of layered software devices. |
@@ -3958,14 +3963,14 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from) | |||
3958 | { | 3963 | { |
3959 | int err = 0; | 3964 | int err = 0; |
3960 | 3965 | ||
3961 | ASSERT_RTNL(); | ||
3962 | |||
3963 | if (to->addr_len != from->addr_len) | 3966 | if (to->addr_len != from->addr_len) |
3964 | return -EINVAL; | 3967 | return -EINVAL; |
3965 | 3968 | ||
3969 | netif_addr_lock_bh(to); | ||
3966 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); | 3970 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); |
3967 | if (!err) | 3971 | if (!err) |
3968 | __dev_set_rx_mode(to); | 3972 | __dev_set_rx_mode(to); |
3973 | netif_addr_unlock_bh(to); | ||
3969 | return err; | 3974 | return err; |
3970 | } | 3975 | } |
3971 | EXPORT_SYMBOL(dev_unicast_sync); | 3976 | EXPORT_SYMBOL(dev_unicast_sync); |
@@ -3981,27 +3986,27 @@ EXPORT_SYMBOL(dev_unicast_sync); | |||
3981 | */ | 3986 | */ |
3982 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) | 3987 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) |
3983 | { | 3988 | { |
3984 | ASSERT_RTNL(); | ||
3985 | |||
3986 | if (to->addr_len != from->addr_len) | 3989 | if (to->addr_len != from->addr_len) |
3987 | return; | 3990 | return; |
3988 | 3991 | ||
3992 | netif_addr_lock_bh(from); | ||
3993 | netif_addr_lock(to); | ||
3989 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); | 3994 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); |
3990 | __dev_set_rx_mode(to); | 3995 | __dev_set_rx_mode(to); |
3996 | netif_addr_unlock(to); | ||
3997 | netif_addr_unlock_bh(from); | ||
3991 | } | 3998 | } |
3992 | EXPORT_SYMBOL(dev_unicast_unsync); | 3999 | EXPORT_SYMBOL(dev_unicast_unsync); |
3993 | 4000 | ||
3994 | static void dev_unicast_flush(struct net_device *dev) | 4001 | static void dev_unicast_flush(struct net_device *dev) |
3995 | { | 4002 | { |
3996 | /* rtnl_mutex must be held here */ | 4003 | netif_addr_lock_bh(dev); |
3997 | |||
3998 | __hw_addr_flush(&dev->uc); | 4004 | __hw_addr_flush(&dev->uc); |
4005 | netif_addr_unlock_bh(dev); | ||
3999 | } | 4006 | } |
4000 | 4007 | ||
4001 | static void dev_unicast_init(struct net_device *dev) | 4008 | static void dev_unicast_init(struct net_device *dev) |
4002 | { | 4009 | { |
4003 | /* rtnl_mutex must be held here */ | ||
4004 | |||
4005 | __hw_addr_init(&dev->uc); | 4010 | __hw_addr_init(&dev->uc); |
4006 | } | 4011 | } |
4007 | 4012 | ||
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 78e5bfc454ae..493775f4f2f1 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -81,7 +81,7 @@ | |||
81 | struct gen_estimator | 81 | struct gen_estimator |
82 | { | 82 | { |
83 | struct list_head list; | 83 | struct list_head list; |
84 | struct gnet_stats_basic *bstats; | 84 | struct gnet_stats_basic_packed *bstats; |
85 | struct gnet_stats_rate_est *rate_est; | 85 | struct gnet_stats_rate_est *rate_est; |
86 | spinlock_t *stats_lock; | 86 | spinlock_t *stats_lock; |
87 | int ewma_log; | 87 | int ewma_log; |
@@ -165,7 +165,7 @@ static void gen_add_node(struct gen_estimator *est) | |||
165 | } | 165 | } |
166 | 166 | ||
167 | static | 167 | static |
168 | struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats, | 168 | struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats, |
169 | const struct gnet_stats_rate_est *rate_est) | 169 | const struct gnet_stats_rate_est *rate_est) |
170 | { | 170 | { |
171 | struct rb_node *p = est_root.rb_node; | 171 | struct rb_node *p = est_root.rb_node; |
@@ -202,7 +202,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats, | |||
202 | * | 202 | * |
203 | * NOTE: Called under rtnl_mutex | 203 | * NOTE: Called under rtnl_mutex |
204 | */ | 204 | */ |
205 | int gen_new_estimator(struct gnet_stats_basic *bstats, | 205 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, |
206 | struct gnet_stats_rate_est *rate_est, | 206 | struct gnet_stats_rate_est *rate_est, |
207 | spinlock_t *stats_lock, | 207 | spinlock_t *stats_lock, |
208 | struct nlattr *opt) | 208 | struct nlattr *opt) |
@@ -262,7 +262,7 @@ static void __gen_kill_estimator(struct rcu_head *head) | |||
262 | * | 262 | * |
263 | * NOTE: Called under rtnl_mutex | 263 | * NOTE: Called under rtnl_mutex |
264 | */ | 264 | */ |
265 | void gen_kill_estimator(struct gnet_stats_basic *bstats, | 265 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, |
266 | struct gnet_stats_rate_est *rate_est) | 266 | struct gnet_stats_rate_est *rate_est) |
267 | { | 267 | { |
268 | struct gen_estimator *e; | 268 | struct gen_estimator *e; |
@@ -292,7 +292,7 @@ EXPORT_SYMBOL(gen_kill_estimator); | |||
292 | * | 292 | * |
293 | * Returns 0 on success or a negative error code. | 293 | * Returns 0 on success or a negative error code. |
294 | */ | 294 | */ |
295 | int gen_replace_estimator(struct gnet_stats_basic *bstats, | 295 | int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, |
296 | struct gnet_stats_rate_est *rate_est, | 296 | struct gnet_stats_rate_est *rate_est, |
297 | spinlock_t *stats_lock, struct nlattr *opt) | 297 | spinlock_t *stats_lock, struct nlattr *opt) |
298 | { | 298 | { |
@@ -308,7 +308,7 @@ EXPORT_SYMBOL(gen_replace_estimator); | |||
308 | * | 308 | * |
309 | * Returns true if estimator is active, and false if not. | 309 | * Returns true if estimator is active, and false if not. |
310 | */ | 310 | */ |
311 | bool gen_estimator_active(const struct gnet_stats_basic *bstats, | 311 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, |
312 | const struct gnet_stats_rate_est *rate_est) | 312 | const struct gnet_stats_rate_est *rate_est) |
313 | { | 313 | { |
314 | ASSERT_RTNL(); | 314 | ASSERT_RTNL(); |
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index c3d0ffeac243..8569310268ab 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c | |||
@@ -106,16 +106,21 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, | |||
106 | * if the room in the socket buffer was not sufficient. | 106 | * if the room in the socket buffer was not sufficient. |
107 | */ | 107 | */ |
108 | int | 108 | int |
109 | gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic *b) | 109 | gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b) |
110 | { | 110 | { |
111 | if (d->compat_tc_stats) { | 111 | if (d->compat_tc_stats) { |
112 | d->tc_stats.bytes = b->bytes; | 112 | d->tc_stats.bytes = b->bytes; |
113 | d->tc_stats.packets = b->packets; | 113 | d->tc_stats.packets = b->packets; |
114 | } | 114 | } |
115 | 115 | ||
116 | if (d->tail) | 116 | if (d->tail) { |
117 | return gnet_stats_copy(d, TCA_STATS_BASIC, b, sizeof(*b)); | 117 | struct gnet_stats_basic sb; |
118 | 118 | ||
119 | memset(&sb, 0, sizeof(sb)); | ||
120 | sb.bytes = b->bytes; | ||
121 | sb.packets = b->packets; | ||
122 | return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb)); | ||
123 | } | ||
119 | return 0; | 124 | return 0; |
120 | } | 125 | } |
121 | 126 | ||
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index b7292a2719dc..197283072cc8 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -488,7 +488,7 @@ int net_assign_generic(struct net *net, int id, void *data) | |||
488 | */ | 488 | */ |
489 | 489 | ||
490 | ng->len = id; | 490 | ng->len = id; |
491 | memcpy(&ng->ptr, &old_ng->ptr, old_ng->len); | 491 | memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); |
492 | 492 | ||
493 | rcu_assign_pointer(net->gen, ng); | 493 | rcu_assign_pointer(net->gen, ng); |
494 | call_rcu(&old_ng->rcu, net_generic_release); | 494 | call_rcu(&old_ng->rcu, net_generic_release); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 9675f312830d..df30feb2fc72 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -740,7 +740,7 @@ int netpoll_setup(struct netpoll *np) | |||
740 | np->name); | 740 | np->name); |
741 | break; | 741 | break; |
742 | } | 742 | } |
743 | cond_resched(); | 743 | msleep(1); |
744 | } | 744 | } |
745 | 745 | ||
746 | /* If carrier appears to come up instantly, we don't | 746 | /* If carrier appears to come up instantly, we don't |
diff --git a/net/core/sock.c b/net/core/sock.c index b0ba569bc973..bbb25be7ddfe 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -631,7 +631,7 @@ set_rcvbuf: | |||
631 | 631 | ||
632 | case SO_TIMESTAMPING: | 632 | case SO_TIMESTAMPING: |
633 | if (val & ~SOF_TIMESTAMPING_MASK) { | 633 | if (val & ~SOF_TIMESTAMPING_MASK) { |
634 | ret = EINVAL; | 634 | ret = -EINVAL; |
635 | break; | 635 | break; |
636 | } | 636 | } |
637 | sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, | 637 | sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, |
@@ -919,13 +919,19 @@ static inline void sock_lock_init(struct sock *sk) | |||
919 | af_family_keys + sk->sk_family); | 919 | af_family_keys + sk->sk_family); |
920 | } | 920 | } |
921 | 921 | ||
922 | /* | ||
923 | * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, | ||
924 | * even temporarly, because of RCU lookups. sk_node should also be left as is. | ||
925 | */ | ||
922 | static void sock_copy(struct sock *nsk, const struct sock *osk) | 926 | static void sock_copy(struct sock *nsk, const struct sock *osk) |
923 | { | 927 | { |
924 | #ifdef CONFIG_SECURITY_NETWORK | 928 | #ifdef CONFIG_SECURITY_NETWORK |
925 | void *sptr = nsk->sk_security; | 929 | void *sptr = nsk->sk_security; |
926 | #endif | 930 | #endif |
927 | 931 | BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != | |
928 | memcpy(nsk, osk, osk->sk_prot->obj_size); | 932 | sizeof(osk->sk_node) + sizeof(osk->sk_refcnt)); |
933 | memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, | ||
934 | osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); | ||
929 | #ifdef CONFIG_SECURITY_NETWORK | 935 | #ifdef CONFIG_SECURITY_NETWORK |
930 | nsk->sk_security = sptr; | 936 | nsk->sk_security = sptr; |
931 | security_sk_clone(osk, nsk); | 937 | security_sk_clone(osk, nsk); |
@@ -939,8 +945,23 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, | |||
939 | struct kmem_cache *slab; | 945 | struct kmem_cache *slab; |
940 | 946 | ||
941 | slab = prot->slab; | 947 | slab = prot->slab; |
942 | if (slab != NULL) | 948 | if (slab != NULL) { |
943 | sk = kmem_cache_alloc(slab, priority); | 949 | sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); |
950 | if (!sk) | ||
951 | return sk; | ||
952 | if (priority & __GFP_ZERO) { | ||
953 | /* | ||
954 | * caches using SLAB_DESTROY_BY_RCU should let | ||
955 | * sk_node.next un-modified. Special care is taken | ||
956 | * when initializing object to zero. | ||
957 | */ | ||
958 | if (offsetof(struct sock, sk_node.next) != 0) | ||
959 | memset(sk, 0, offsetof(struct sock, sk_node.next)); | ||
960 | memset(&sk->sk_node.pprev, 0, | ||
961 | prot->obj_size - offsetof(struct sock, | ||
962 | sk_node.pprev)); | ||
963 | } | ||
964 | } | ||
944 | else | 965 | else |
945 | sk = kmalloc(prot->obj_size, priority); | 966 | sk = kmalloc(prot->obj_size, priority); |
946 | 967 | ||
@@ -1125,6 +1146,11 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | |||
1125 | 1146 | ||
1126 | newsk->sk_err = 0; | 1147 | newsk->sk_err = 0; |
1127 | newsk->sk_priority = 0; | 1148 | newsk->sk_priority = 0; |
1149 | /* | ||
1150 | * Before updating sk_refcnt, we must commit prior changes to memory | ||
1151 | * (Documentation/RCU/rculist_nulls.txt for details) | ||
1152 | */ | ||
1153 | smp_wmb(); | ||
1128 | atomic_set(&newsk->sk_refcnt, 2); | 1154 | atomic_set(&newsk->sk_refcnt, 2); |
1129 | 1155 | ||
1130 | /* | 1156 | /* |
@@ -1715,7 +1741,7 @@ EXPORT_SYMBOL(sock_no_sendpage); | |||
1715 | static void sock_def_wakeup(struct sock *sk) | 1741 | static void sock_def_wakeup(struct sock *sk) |
1716 | { | 1742 | { |
1717 | read_lock(&sk->sk_callback_lock); | 1743 | read_lock(&sk->sk_callback_lock); |
1718 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 1744 | if (sk_has_sleeper(sk)) |
1719 | wake_up_interruptible_all(sk->sk_sleep); | 1745 | wake_up_interruptible_all(sk->sk_sleep); |
1720 | read_unlock(&sk->sk_callback_lock); | 1746 | read_unlock(&sk->sk_callback_lock); |
1721 | } | 1747 | } |
@@ -1723,7 +1749,7 @@ static void sock_def_wakeup(struct sock *sk) | |||
1723 | static void sock_def_error_report(struct sock *sk) | 1749 | static void sock_def_error_report(struct sock *sk) |
1724 | { | 1750 | { |
1725 | read_lock(&sk->sk_callback_lock); | 1751 | read_lock(&sk->sk_callback_lock); |
1726 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 1752 | if (sk_has_sleeper(sk)) |
1727 | wake_up_interruptible_poll(sk->sk_sleep, POLLERR); | 1753 | wake_up_interruptible_poll(sk->sk_sleep, POLLERR); |
1728 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); | 1754 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); |
1729 | read_unlock(&sk->sk_callback_lock); | 1755 | read_unlock(&sk->sk_callback_lock); |
@@ -1732,7 +1758,7 @@ static void sock_def_error_report(struct sock *sk) | |||
1732 | static void sock_def_readable(struct sock *sk, int len) | 1758 | static void sock_def_readable(struct sock *sk, int len) |
1733 | { | 1759 | { |
1734 | read_lock(&sk->sk_callback_lock); | 1760 | read_lock(&sk->sk_callback_lock); |
1735 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 1761 | if (sk_has_sleeper(sk)) |
1736 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | | 1762 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | |
1737 | POLLRDNORM | POLLRDBAND); | 1763 | POLLRDNORM | POLLRDBAND); |
1738 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | 1764 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); |
@@ -1747,7 +1773,7 @@ static void sock_def_write_space(struct sock *sk) | |||
1747 | * progress. --DaveM | 1773 | * progress. --DaveM |
1748 | */ | 1774 | */ |
1749 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { | 1775 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { |
1750 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | 1776 | if (sk_has_sleeper(sk)) |
1751 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | | 1777 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | |
1752 | POLLWRNORM | POLLWRBAND); | 1778 | POLLWRNORM | POLLWRBAND); |
1753 | 1779 | ||
@@ -1840,6 +1866,11 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1840 | 1866 | ||
1841 | sk->sk_stamp = ktime_set(-1L, 0); | 1867 | sk->sk_stamp = ktime_set(-1L, 0); |
1842 | 1868 | ||
1869 | /* | ||
1870 | * Before updating sk_refcnt, we must commit prior changes to memory | ||
1871 | * (Documentation/RCU/rculist_nulls.txt for details) | ||
1872 | */ | ||
1873 | smp_wmb(); | ||
1843 | atomic_set(&sk->sk_refcnt, 1); | 1874 | atomic_set(&sk->sk_refcnt, 1); |
1844 | atomic_set(&sk->sk_wmem_alloc, 1); | 1875 | atomic_set(&sk->sk_wmem_alloc, 1); |
1845 | atomic_set(&sk->sk_drops, 0); | 1876 | atomic_set(&sk->sk_drops, 0); |