aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-10-22 08:36:53 -0400
committerDavid S. Miller <davem@davemloft.net>2017-10-22 08:39:14 -0400
commitf8ddadc4db6c7b7029b6d0e0d9af24f74ad27ca2 (patch)
tree0a6432aba336bae42313613f4c891bcfce02bd4e /net/core
parentbdd091bab8c631bd2801af838e344fad34566410 (diff)
parentb5ac3beb5a9f0ef0ea64cd85faf94c0dc4de0e42 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
There were quite a few overlapping sets of changes here. Daniel's bug fix for off-by-ones in the new BPF branch instructions, along with the added allowances for "data_end > ptr + x" forms collided with the metadata additions. Along with those three changes came veritifer test cases, which in their final form I tried to group together properly. If I had just trimmed GIT's conflict tags as-is, this would have split up the meta tests unnecessarily. In the socketmap code, a set of preemption disabling changes overlapped with the rename of bpf_compute_data_end() to bpf_compute_data_pointers(). Changes were made to the mv88e6060.c driver set addr method which got removed in net-next. The hyperv transport socket layer had a locking change in 'net' which overlapped with a change of socket state macro usage in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/dev_ioctl.c13
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/filter.c32
-rw-r--r--net/core/rtnetlink.c13
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/sock.c8
-rw-r--r--net/core/sock_reuseport.c12
8 files changed, 63 insertions, 32 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index cf5894f0e6eb..24ac9083bc13 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1149,9 +1149,8 @@ static int dev_alloc_name_ns(struct net *net,
1149 return ret; 1149 return ret;
1150} 1150}
1151 1151
1152static int dev_get_valid_name(struct net *net, 1152int dev_get_valid_name(struct net *net, struct net_device *dev,
1153 struct net_device *dev, 1153 const char *name)
1154 const char *name)
1155{ 1154{
1156 BUG_ON(!net); 1155 BUG_ON(!net);
1157 1156
@@ -1167,6 +1166,7 @@ static int dev_get_valid_name(struct net *net,
1167 1166
1168 return 0; 1167 return 0;
1169} 1168}
1169EXPORT_SYMBOL(dev_get_valid_name);
1170 1170
1171/** 1171/**
1172 * dev_change_name - change name of a device 1172 * dev_change_name - change name of a device
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 709a4e6fb447..f9c7a88cd981 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -303,7 +303,18 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
303 case SIOCSIFTXQLEN: 303 case SIOCSIFTXQLEN:
304 if (ifr->ifr_qlen < 0) 304 if (ifr->ifr_qlen < 0)
305 return -EINVAL; 305 return -EINVAL;
306 dev->tx_queue_len = ifr->ifr_qlen; 306 if (dev->tx_queue_len ^ ifr->ifr_qlen) {
307 unsigned int orig_len = dev->tx_queue_len;
308
309 dev->tx_queue_len = ifr->ifr_qlen;
310 err = call_netdevice_notifiers(
311 NETDEV_CHANGE_TX_QUEUE_LEN, dev);
312 err = notifier_to_errno(err);
313 if (err) {
314 dev->tx_queue_len = orig_len;
315 return err;
316 }
317 }
307 return 0; 318 return 0;
308 319
309 case SIOCSIFNAME: 320 case SIOCSIFNAME:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 0c406306792a..f8fcf450a36e 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -452,7 +452,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
452EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32); 452EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
453 453
454/* return false if legacy contained non-0 deprecated fields 454/* return false if legacy contained non-0 deprecated fields
455 * transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated 455 * maxtxpkt/maxrxpkt. rest of ksettings always updated
456 */ 456 */
457static bool 457static bool
458convert_legacy_settings_to_link_ksettings( 458convert_legacy_settings_to_link_ksettings(
@@ -467,8 +467,7 @@ convert_legacy_settings_to_link_ksettings(
467 * deprecated legacy fields, and they should not use 467 * deprecated legacy fields, and they should not use
468 * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS 468 * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS
469 */ 469 */
470 if (legacy_settings->transceiver || 470 if (legacy_settings->maxtxpkt ||
471 legacy_settings->maxtxpkt ||
472 legacy_settings->maxrxpkt) 471 legacy_settings->maxrxpkt)
473 retval = false; 472 retval = false;
474 473
diff --git a/net/core/filter.c b/net/core/filter.c
index ccf62f44140a..b79c44cc8145 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1840,31 +1840,31 @@ static const struct bpf_func_proto bpf_redirect_proto = {
1840 .arg2_type = ARG_ANYTHING, 1840 .arg2_type = ARG_ANYTHING,
1841}; 1841};
1842 1842
1843BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags) 1843BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
1844 struct bpf_map *, map, u32, key, u64, flags)
1844{ 1845{
1845 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 1846 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1846 1847
1847 if (unlikely(flags)) 1848 if (unlikely(flags))
1848 return SK_ABORTED; 1849 return SK_ABORTED;
1849 1850
1850 ri->ifindex = key; 1851 tcb->bpf.key = key;
1851 ri->flags = flags; 1852 tcb->bpf.flags = flags;
1852 ri->map = map; 1853 tcb->bpf.map = map;
1853 1854
1854 return SK_REDIRECT; 1855 return SK_REDIRECT;
1855} 1856}
1856 1857
1857struct sock *do_sk_redirect_map(void) 1858struct sock *do_sk_redirect_map(struct sk_buff *skb)
1858{ 1859{
1859 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 1860 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1860 struct sock *sk = NULL; 1861 struct sock *sk = NULL;
1861 1862
1862 if (ri->map) { 1863 if (tcb->bpf.map) {
1863 sk = __sock_map_lookup_elem(ri->map, ri->ifindex); 1864 sk = __sock_map_lookup_elem(tcb->bpf.map, tcb->bpf.key);
1864 1865
1865 ri->ifindex = 0; 1866 tcb->bpf.key = 0;
1866 ri->map = NULL; 1867 tcb->bpf.map = NULL;
1867 /* we do not clear flags for future lookup */
1868 } 1868 }
1869 1869
1870 return sk; 1870 return sk;
@@ -1874,9 +1874,10 @@ static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
1874 .func = bpf_sk_redirect_map, 1874 .func = bpf_sk_redirect_map,
1875 .gpl_only = false, 1875 .gpl_only = false,
1876 .ret_type = RET_INTEGER, 1876 .ret_type = RET_INTEGER,
1877 .arg1_type = ARG_CONST_MAP_PTR, 1877 .arg1_type = ARG_PTR_TO_CTX,
1878 .arg2_type = ARG_ANYTHING, 1878 .arg2_type = ARG_CONST_MAP_PTR,
1879 .arg3_type = ARG_ANYTHING, 1879 .arg3_type = ARG_ANYTHING,
1880 .arg4_type = ARG_ANYTHING,
1880}; 1881};
1881 1882
1882BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) 1883BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
@@ -3902,7 +3903,6 @@ static bool sk_skb_is_valid_access(int off, int size,
3902 3903
3903 if (type == BPF_WRITE) { 3904 if (type == BPF_WRITE) {
3904 switch (off) { 3905 switch (off) {
3905 case bpf_ctx_range(struct __sk_buff, mark):
3906 case bpf_ctx_range(struct __sk_buff, tc_index): 3906 case bpf_ctx_range(struct __sk_buff, tc_index):
3907 case bpf_ctx_range(struct __sk_buff, priority): 3907 case bpf_ctx_range(struct __sk_buff, priority):
3908 break; 3908 break;
@@ -3912,6 +3912,8 @@ static bool sk_skb_is_valid_access(int off, int size,
3912 } 3912 }
3913 3913
3914 switch (off) { 3914 switch (off) {
3915 case bpf_ctx_range(struct __sk_buff, mark):
3916 return false;
3915 case bpf_ctx_range(struct __sk_buff, data): 3917 case bpf_ctx_range(struct __sk_buff, data):
3916 info->reg_type = PTR_TO_PACKET; 3918 info->reg_type = PTR_TO_PACKET;
3917 break; 3919 break;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 20b550d07fe3..04680a53c8dd 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1551,7 +1551,10 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1551 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1551 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1552 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1552 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1553 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1553 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1554 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 1554 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1555 * allow 0-length string (needed to remove an alias).
1556 */
1557 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1555 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1558 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1556 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1559 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1557 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1560 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
@@ -2172,7 +2175,7 @@ static int do_setlink(const struct sk_buff *skb,
2172 dev->tx_queue_len = orig_len; 2175 dev->tx_queue_len = orig_len;
2173 goto errout; 2176 goto errout;
2174 } 2177 }
2175 status |= DO_SETLINK_NOTIFY; 2178 status |= DO_SETLINK_MODIFIED;
2176 } 2179 }
2177 } 2180 }
2178 2181
@@ -2332,7 +2335,7 @@ static int do_setlink(const struct sk_buff *skb,
2332 2335
2333errout: 2336errout:
2334 if (status & DO_SETLINK_MODIFIED) { 2337 if (status & DO_SETLINK_MODIFIED) {
2335 if (status & DO_SETLINK_NOTIFY) 2338 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
2336 netdev_state_change(dev); 2339 netdev_state_change(dev);
2337 2340
2338 if (err < 0) 2341 if (err < 0)
@@ -4373,13 +4376,17 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
4373 4376
4374 switch (event) { 4377 switch (event) {
4375 case NETDEV_REBOOT: 4378 case NETDEV_REBOOT:
4379 case NETDEV_CHANGEMTU:
4376 case NETDEV_CHANGEADDR: 4380 case NETDEV_CHANGEADDR:
4377 case NETDEV_CHANGENAME: 4381 case NETDEV_CHANGENAME:
4378 case NETDEV_FEAT_CHANGE: 4382 case NETDEV_FEAT_CHANGE:
4379 case NETDEV_BONDING_FAILOVER: 4383 case NETDEV_BONDING_FAILOVER:
4384 case NETDEV_POST_TYPE_CHANGE:
4380 case NETDEV_NOTIFY_PEERS: 4385 case NETDEV_NOTIFY_PEERS:
4386 case NETDEV_CHANGEUPPER:
4381 case NETDEV_RESEND_IGMP: 4387 case NETDEV_RESEND_IGMP:
4382 case NETDEV_CHANGEINFODATA: 4388 case NETDEV_CHANGEINFODATA:
4389 case NETDEV_CHANGE_TX_QUEUE_LEN:
4383 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 4390 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
4384 GFP_KERNEL, NULL); 4391 GFP_KERNEL, NULL);
4385 break; 4392 break;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 40717501cbdd..97e604d55d55 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1124,9 +1124,13 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1124 1124
1125 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); 1125 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1126 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1126 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1127 struct sock *save_sk = skb->sk;
1128
1127 /* Streams do not free skb on error. Reset to prev state. */ 1129 /* Streams do not free skb on error. Reset to prev state. */
1128 msg->msg_iter = orig_iter; 1130 msg->msg_iter = orig_iter;
1131 skb->sk = sk;
1129 ___pskb_trim(skb, orig_len); 1132 ___pskb_trim(skb, orig_len);
1133 skb->sk = save_sk;
1130 return err; 1134 return err;
1131 } 1135 }
1132 1136
@@ -1895,7 +1899,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
1895 } 1899 }
1896 1900
1897 /* If we need update frag list, we are in troubles. 1901 /* If we need update frag list, we are in troubles.
1898 * Certainly, it possible to add an offset to skb data, 1902 * Certainly, it is possible to add an offset to skb data,
1899 * but taking into account that pulling is expected to 1903 * but taking into account that pulling is expected to
1900 * be very rare operation, it is worth to fight against 1904 * be very rare operation, it is worth to fight against
1901 * further bloating skb head and crucify ourselves here instead. 1905 * further bloating skb head and crucify ourselves here instead.
diff --git a/net/core/sock.c b/net/core/sock.c
index 35656a9e4e44..759400053110 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1677,12 +1677,17 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1677 newsk->sk_dst_pending_confirm = 0; 1677 newsk->sk_dst_pending_confirm = 0;
1678 newsk->sk_wmem_queued = 0; 1678 newsk->sk_wmem_queued = 0;
1679 newsk->sk_forward_alloc = 0; 1679 newsk->sk_forward_alloc = 0;
1680
1681 /* sk->sk_memcg will be populated at accept() time */
1682 newsk->sk_memcg = NULL;
1683
1680 atomic_set(&newsk->sk_drops, 0); 1684 atomic_set(&newsk->sk_drops, 0);
1681 newsk->sk_send_head = NULL; 1685 newsk->sk_send_head = NULL;
1682 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1686 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1683 atomic_set(&newsk->sk_zckey, 0); 1687 atomic_set(&newsk->sk_zckey, 0);
1684 1688
1685 sock_reset_flag(newsk, SOCK_DONE); 1689 sock_reset_flag(newsk, SOCK_DONE);
1690 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1686 1691
1687 rcu_read_lock(); 1692 rcu_read_lock();
1688 filter = rcu_dereference(sk->sk_filter); 1693 filter = rcu_dereference(sk->sk_filter);
@@ -1714,9 +1719,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1714 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1719 newsk->sk_incoming_cpu = raw_smp_processor_id();
1715 atomic64_set(&newsk->sk_cookie, 0); 1720 atomic64_set(&newsk->sk_cookie, 0);
1716 1721
1717 mem_cgroup_sk_alloc(newsk);
1718 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1719
1720 /* 1722 /*
1721 * Before updating sk_refcnt, we must commit prior changes to memory 1723 * Before updating sk_refcnt, we must commit prior changes to memory
1722 * (Documentation/RCU/rculist_nulls.txt for details) 1724 * (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index eed1ebf7f29d..b1e0dbea1e8c 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
36 * soft irq of receive path or setsockopt from process context 36 * soft irq of receive path or setsockopt from process context
37 */ 37 */
38 spin_lock_bh(&reuseport_lock); 38 spin_lock_bh(&reuseport_lock);
39 WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb, 39
40 lockdep_is_held(&reuseport_lock)), 40 /* Allocation attempts can occur concurrently via the setsockopt path
41 "multiple allocations for the same socket"); 41 * and the bind/hash path. Nothing to do when we lose the race.
42 */
43 if (rcu_dereference_protected(sk->sk_reuseport_cb,
44 lockdep_is_held(&reuseport_lock)))
45 goto out;
46
42 reuse = __reuseport_alloc(INIT_SOCKS); 47 reuse = __reuseport_alloc(INIT_SOCKS);
43 if (!reuse) { 48 if (!reuse) {
44 spin_unlock_bh(&reuseport_lock); 49 spin_unlock_bh(&reuseport_lock);
@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
49 reuse->num_socks = 1; 54 reuse->num_socks = 1;
50 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); 55 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
51 56
57out:
52 spin_unlock_bh(&reuseport_lock); 58 spin_unlock_bh(&reuseport_lock);
53 59
54 return 0; 60 return 0;