aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorBrian Norris <computersforpeace@gmail.com>2017-06-01 13:53:55 -0400
committerBrian Norris <computersforpeace@gmail.com>2017-06-01 13:53:55 -0400
commit05e97a9eda72d58dba293857df6aac62584ef99a (patch)
treee86e692f26d4879ff2210c54722e2b7780210249 /net/core
parent2ea659a9ef488125eb46da6eb571de5eae5c43f6 (diff)
parentd4ed3b9015b5eebc90d629579d9e7944607cbae5 (diff)
Merge tag 'nand/fixes-for-4.12-rc3' of git://git.infradead.org/linux-mtd into MTD
From Boris: """ This pull request contains several fixes to the core and the tango driver. tango fixes: * Add missing MODULE_DEVICE_TABLE() in tango_nand.c * Update the number of corrected bitflips core fixes: * Fix a long standing memory leak in nand_scan_tail() * Fix several bugs introduced by the per-vendor init/detection infrastructure (introduced in 4.12) * Add a static specifier to nand_ooblayout_lp_hamming_ops definition """
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c57
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/rtnetlink.c81
-rw-r--r--net/core/sock.c23
4 files changed, 103 insertions, 72 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 96cf83da0d66..fca407b4a6ea 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6852,6 +6852,32 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
6852} 6852}
6853EXPORT_SYMBOL(dev_change_proto_down); 6853EXPORT_SYMBOL(dev_change_proto_down);
6854 6854
6855bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op)
6856{
6857 struct netdev_xdp xdp;
6858
6859 memset(&xdp, 0, sizeof(xdp));
6860 xdp.command = XDP_QUERY_PROG;
6861
6862 /* Query must always succeed. */
6863 WARN_ON(xdp_op(dev, &xdp) < 0);
6864 return xdp.prog_attached;
6865}
6866
6867static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
6868 struct netlink_ext_ack *extack,
6869 struct bpf_prog *prog)
6870{
6871 struct netdev_xdp xdp;
6872
6873 memset(&xdp, 0, sizeof(xdp));
6874 xdp.command = XDP_SETUP_PROG;
6875 xdp.extack = extack;
6876 xdp.prog = prog;
6877
6878 return xdp_op(dev, &xdp);
6879}
6880
6855/** 6881/**
6856 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 6882 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
6857 * @dev: device 6883 * @dev: device
@@ -6864,41 +6890,34 @@ EXPORT_SYMBOL(dev_change_proto_down);
6864int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 6890int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
6865 int fd, u32 flags) 6891 int fd, u32 flags)
6866{ 6892{
6867 int (*xdp_op)(struct net_device *dev, struct netdev_xdp *xdp);
6868 const struct net_device_ops *ops = dev->netdev_ops; 6893 const struct net_device_ops *ops = dev->netdev_ops;
6869 struct bpf_prog *prog = NULL; 6894 struct bpf_prog *prog = NULL;
6870 struct netdev_xdp xdp; 6895 xdp_op_t xdp_op, xdp_chk;
6871 int err; 6896 int err;
6872 6897
6873 ASSERT_RTNL(); 6898 ASSERT_RTNL();
6874 6899
6875 xdp_op = ops->ndo_xdp; 6900 xdp_op = xdp_chk = ops->ndo_xdp;
6901 if (!xdp_op && (flags & XDP_FLAGS_DRV_MODE))
6902 return -EOPNOTSUPP;
6876 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) 6903 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
6877 xdp_op = generic_xdp_install; 6904 xdp_op = generic_xdp_install;
6905 if (xdp_op == xdp_chk)
6906 xdp_chk = generic_xdp_install;
6878 6907
6879 if (fd >= 0) { 6908 if (fd >= 0) {
6880 if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) { 6909 if (xdp_chk && __dev_xdp_attached(dev, xdp_chk))
6881 memset(&xdp, 0, sizeof(xdp)); 6910 return -EEXIST;
6882 xdp.command = XDP_QUERY_PROG; 6911 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
6883 6912 __dev_xdp_attached(dev, xdp_op))
6884 err = xdp_op(dev, &xdp); 6913 return -EBUSY;
6885 if (err < 0)
6886 return err;
6887 if (xdp.prog_attached)
6888 return -EBUSY;
6889 }
6890 6914
6891 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); 6915 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
6892 if (IS_ERR(prog)) 6916 if (IS_ERR(prog))
6893 return PTR_ERR(prog); 6917 return PTR_ERR(prog);
6894 } 6918 }
6895 6919
6896 memset(&xdp, 0, sizeof(xdp)); 6920 err = dev_xdp_install(dev, xdp_op, extack, prog);
6897 xdp.command = XDP_SETUP_PROG;
6898 xdp.extack = extack;
6899 xdp.prog = prog;
6900
6901 err = xdp_op(dev, &xdp);
6902 if (err < 0 && prog) 6921 if (err < 0 && prog)
6903 bpf_prog_put(prog); 6922 bpf_prog_put(prog);
6904 6923
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 58b0bcc125b5..d274f81fcc2c 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1132,10 +1132,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1132 lladdr = neigh->ha; 1132 lladdr = neigh->ha;
1133 } 1133 }
1134 1134
1135 if (new & NUD_CONNECTED)
1136 neigh->confirmed = jiffies;
1137 neigh->updated = jiffies;
1138
1139 /* If entry was valid and address is not changed, 1135 /* If entry was valid and address is not changed,
1140 do not change entry state, if new one is STALE. 1136 do not change entry state, if new one is STALE.
1141 */ 1137 */
@@ -1157,6 +1153,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1157 } 1153 }
1158 } 1154 }
1159 1155
1156 /* Update timestamps only once we know we will make a change to the
1157 * neighbour entry. Otherwise we risk to move the locktime window with
1158 * noop updates and ignore relevant ARP updates.
1159 */
1160 if (new != old || lladdr != neigh->ha) {
1161 if (new & NUD_CONNECTED)
1162 neigh->confirmed = jiffies;
1163 neigh->updated = jiffies;
1164 }
1165
1160 if (new != old) { 1166 if (new != old) {
1161 neigh_del_timer(neigh); 1167 neigh_del_timer(neigh);
1162 if (new & NUD_PROBE) 1168 if (new & NUD_PROBE)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bcb0f610ee42..49a279a7cc15 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -899,8 +899,7 @@ static size_t rtnl_port_size(const struct net_device *dev,
899static size_t rtnl_xdp_size(void) 899static size_t rtnl_xdp_size(void)
900{ 900{
901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
902 nla_total_size(1) + /* XDP_ATTACHED */ 902 nla_total_size(1); /* XDP_ATTACHED */
903 nla_total_size(4); /* XDP_FLAGS */
904 903
905 return xdp_size; 904 return xdp_size;
906} 905}
@@ -1247,37 +1246,34 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1247 return 0; 1246 return 0;
1248} 1247}
1249 1248
1249static u8 rtnl_xdp_attached_mode(struct net_device *dev)
1250{
1251 const struct net_device_ops *ops = dev->netdev_ops;
1252
1253 ASSERT_RTNL();
1254
1255 if (rcu_access_pointer(dev->xdp_prog))
1256 return XDP_ATTACHED_SKB;
1257 if (ops->ndo_xdp && __dev_xdp_attached(dev, ops->ndo_xdp))
1258 return XDP_ATTACHED_DRV;
1259
1260 return XDP_ATTACHED_NONE;
1261}
1262
1250static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1263static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1251{ 1264{
1252 struct nlattr *xdp; 1265 struct nlattr *xdp;
1253 u32 xdp_flags = 0;
1254 u8 val = 0;
1255 int err; 1266 int err;
1256 1267
1257 xdp = nla_nest_start(skb, IFLA_XDP); 1268 xdp = nla_nest_start(skb, IFLA_XDP);
1258 if (!xdp) 1269 if (!xdp)
1259 return -EMSGSIZE; 1270 return -EMSGSIZE;
1260 if (rcu_access_pointer(dev->xdp_prog)) { 1271
1261 xdp_flags = XDP_FLAGS_SKB_MODE; 1272 err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
1262 val = 1; 1273 rtnl_xdp_attached_mode(dev));
1263 } else if (dev->netdev_ops->ndo_xdp) {
1264 struct netdev_xdp xdp_op = {};
1265
1266 xdp_op.command = XDP_QUERY_PROG;
1267 err = dev->netdev_ops->ndo_xdp(dev, &xdp_op);
1268 if (err)
1269 goto err_cancel;
1270 val = xdp_op.prog_attached;
1271 }
1272 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, val);
1273 if (err) 1274 if (err)
1274 goto err_cancel; 1275 goto err_cancel;
1275 1276
1276 if (xdp_flags) {
1277 err = nla_put_u32(skb, IFLA_XDP_FLAGS, xdp_flags);
1278 if (err)
1279 goto err_cancel;
1280 }
1281 nla_nest_end(skb, xdp); 1277 nla_nest_end(skb, xdp);
1282 return 0; 1278 return 0;
1283 1279
@@ -1631,13 +1627,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1631 cb->nlh->nlmsg_seq, 0, 1627 cb->nlh->nlmsg_seq, 0,
1632 flags, 1628 flags,
1633 ext_filter_mask); 1629 ext_filter_mask);
1634 /* If we ran out of room on the first message,
1635 * we're in trouble
1636 */
1637 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
1638 1630
1639 if (err < 0) 1631 if (err < 0) {
1640 goto out; 1632 if (likely(skb->len))
1633 goto out;
1634
1635 goto out_err;
1636 }
1641 1637
1642 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1638 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1643cont: 1639cont:
@@ -1645,10 +1641,12 @@ cont:
1645 } 1641 }
1646 } 1642 }
1647out: 1643out:
1644 err = skb->len;
1645out_err:
1648 cb->args[1] = idx; 1646 cb->args[1] = idx;
1649 cb->args[0] = h; 1647 cb->args[0] = h;
1650 1648
1651 return skb->len; 1649 return err;
1652} 1650}
1653 1651
1654int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 1652int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
@@ -2199,6 +2197,11 @@ static int do_setlink(const struct sk_buff *skb,
2199 err = -EINVAL; 2197 err = -EINVAL;
2200 goto errout; 2198 goto errout;
2201 } 2199 }
2200 if ((xdp_flags & XDP_FLAGS_SKB_MODE) &&
2201 (xdp_flags & XDP_FLAGS_DRV_MODE)) {
2202 err = -EINVAL;
2203 goto errout;
2204 }
2202 } 2205 }
2203 2206
2204 if (xdp[IFLA_XDP_FD]) { 2207 if (xdp[IFLA_XDP_FD]) {
@@ -3452,8 +3455,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3452 err = br_dev->netdev_ops->ndo_bridge_getlink( 3455 err = br_dev->netdev_ops->ndo_bridge_getlink(
3453 skb, portid, seq, dev, 3456 skb, portid, seq, dev,
3454 filter_mask, NLM_F_MULTI); 3457 filter_mask, NLM_F_MULTI);
3455 if (err < 0 && err != -EOPNOTSUPP) 3458 if (err < 0 && err != -EOPNOTSUPP) {
3456 break; 3459 if (likely(skb->len))
3460 break;
3461
3462 goto out_err;
3463 }
3457 } 3464 }
3458 idx++; 3465 idx++;
3459 } 3466 }
@@ -3464,16 +3471,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3464 seq, dev, 3471 seq, dev,
3465 filter_mask, 3472 filter_mask,
3466 NLM_F_MULTI); 3473 NLM_F_MULTI);
3467 if (err < 0 && err != -EOPNOTSUPP) 3474 if (err < 0 && err != -EOPNOTSUPP) {
3468 break; 3475 if (likely(skb->len))
3476 break;
3477
3478 goto out_err;
3479 }
3469 } 3480 }
3470 idx++; 3481 idx++;
3471 } 3482 }
3472 } 3483 }
3484 err = skb->len;
3485out_err:
3473 rcu_read_unlock(); 3486 rcu_read_unlock();
3474 cb->args[0] = idx; 3487 cb->args[0] = idx;
3475 3488
3476 return skb->len; 3489 return err;
3477} 3490}
3478 3491
3479static inline size_t bridge_nlmsg_size(void) 3492static inline size_t bridge_nlmsg_size(void)
diff --git a/net/core/sock.c b/net/core/sock.c
index 79c6aee6af9b..727f924b7f91 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -139,10 +139,7 @@
139 139
140#include <trace/events/sock.h> 140#include <trace/events/sock.h>
141 141
142#ifdef CONFIG_INET
143#include <net/tcp.h> 142#include <net/tcp.h>
144#endif
145
146#include <net/busy_poll.h> 143#include <net/busy_poll.h>
147 144
148static DEFINE_MUTEX(proto_list_mutex); 145static DEFINE_MUTEX(proto_list_mutex);
@@ -1803,28 +1800,24 @@ EXPORT_SYMBOL(skb_set_owner_w);
1803 * delay queue. We want to allow the owner socket to send more 1800 * delay queue. We want to allow the owner socket to send more
1804 * packets, as if they were already TX completed by a typical driver. 1801 * packets, as if they were already TX completed by a typical driver.
1805 * But we also want to keep skb->sk set because some packet schedulers 1802 * But we also want to keep skb->sk set because some packet schedulers
1806 * rely on it (sch_fq for example). So we set skb->truesize to a small 1803 * rely on it (sch_fq for example).
1807 * amount (1) and decrease sk_wmem_alloc accordingly.
1808 */ 1804 */
1809void skb_orphan_partial(struct sk_buff *skb) 1805void skb_orphan_partial(struct sk_buff *skb)
1810{ 1806{
1811 /* If this skb is a TCP pure ACK or already went here, 1807 if (skb_is_tcp_pure_ack(skb))
1812 * we have nothing to do. 2 is already a very small truesize.
1813 */
1814 if (skb->truesize <= 2)
1815 return; 1808 return;
1816 1809
1817 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1818 * so we do not completely orphan skb, but transfert all
1819 * accounted bytes but one, to avoid unexpected reorders.
1820 */
1821 if (skb->destructor == sock_wfree 1810 if (skb->destructor == sock_wfree
1822#ifdef CONFIG_INET 1811#ifdef CONFIG_INET
1823 || skb->destructor == tcp_wfree 1812 || skb->destructor == tcp_wfree
1824#endif 1813#endif
1825 ) { 1814 ) {
1826 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); 1815 struct sock *sk = skb->sk;
1827 skb->truesize = 1; 1816
1817 if (atomic_inc_not_zero(&sk->sk_refcnt)) {
1818 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1819 skb->destructor = sock_efree;
1820 }
1828 } else { 1821 } else {
1829 skb_orphan(skb); 1822 skb_orphan(skb);
1830 } 1823 }