aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hci_conn.c5
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/l2cap.c14
-rw-r--r--net/bridge/br_device.c9
-rw-r--r--net/bridge/br_forward.c23
-rw-r--r--net/bridge/br_multicast.c21
-rw-r--r--net/bridge/br_netfilter.c3
-rw-r--r--net/core/dev.c38
-rw-r--r--net/core/ethtool.c41
-rw-r--r--net/core/neighbour.c5
-rw-r--r--net/dsa/Kconfig2
-rw-r--r--net/ipv4/ipmr.c8
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/mip6.c3
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c6
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/phonet/pep.c1
-rw-r--r--net/sched/act_nat.c5
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/xfrm/xfrm_policy.c15
22 files changed, 139 insertions, 72 deletions
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b10e3cdb08f8..800b6b9fbbae 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -358,6 +358,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
358 acl->sec_level = sec_level; 358 acl->sec_level = sec_level;
359 acl->auth_type = auth_type; 359 acl->auth_type = auth_type;
360 hci_acl_connect(acl); 360 hci_acl_connect(acl);
361 } else {
362 if (acl->sec_level < sec_level)
363 acl->sec_level = sec_level;
364 if (acl->auth_type < auth_type)
365 acl->auth_type = auth_type;
361 } 366 }
362 367
363 if (type == ACL_LINK) 368 if (type == ACL_LINK)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 6c57fc71c7e2..786b5de0bac4 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1049,6 +1049,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1049 if (conn) { 1049 if (conn) {
1050 if (!ev->status) 1050 if (!ev->status)
1051 conn->link_mode |= HCI_LM_AUTH; 1051 conn->link_mode |= HCI_LM_AUTH;
1052 else
1053 conn->sec_level = BT_SECURITY_LOW;
1052 1054
1053 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1055 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1054 1056
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 1b682a5aa061..cf3c4073a8a6 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -401,6 +401,11 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
401 l2cap_send_sframe(pi, control); 401 l2cap_send_sframe(pi, control);
402} 402}
403 403
404static inline int __l2cap_no_conn_pending(struct sock *sk)
405{
406 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
407}
408
404static void l2cap_do_start(struct sock *sk) 409static void l2cap_do_start(struct sock *sk)
405{ 410{
406 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 411 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
@@ -409,12 +414,13 @@ static void l2cap_do_start(struct sock *sk)
409 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 414 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
410 return; 415 return;
411 416
412 if (l2cap_check_security(sk)) { 417 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
413 struct l2cap_conn_req req; 418 struct l2cap_conn_req req;
414 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 419 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
415 req.psm = l2cap_pi(sk)->psm; 420 req.psm = l2cap_pi(sk)->psm;
416 421
417 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 422 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
423 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
418 424
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 425 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req); 426 L2CAP_CONN_REQ, sizeof(req), &req);
@@ -464,12 +470,14 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
464 } 470 }
465 471
466 if (sk->sk_state == BT_CONNECT) { 472 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk)) { 473 if (l2cap_check_security(sk) &&
474 __l2cap_no_conn_pending(sk)) {
468 struct l2cap_conn_req req; 475 struct l2cap_conn_req req;
469 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 476 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
470 req.psm = l2cap_pi(sk)->psm; 477 req.psm = l2cap_pi(sk)->psm;
471 478
472 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 479 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
480 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
473 481
474 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 482 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
475 L2CAP_CONN_REQ, sizeof(req), &req); 483 L2CAP_CONN_REQ, sizeof(req), &req);
@@ -2912,7 +2920,6 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2912 l2cap_pi(sk)->ident = 0; 2920 l2cap_pi(sk)->ident = 0;
2913 l2cap_pi(sk)->dcid = dcid; 2921 l2cap_pi(sk)->dcid = dcid;
2914 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2922 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2915
2916 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 2923 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2917 2924
2918 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2925 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
@@ -4404,6 +4411,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4404 req.psm = l2cap_pi(sk)->psm; 4411 req.psm = l2cap_pi(sk)->psm;
4405 4412
4406 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 4413 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4414 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4407 4415
4408 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 4416 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4409 L2CAP_CONN_REQ, sizeof(req), &req); 4417 L2CAP_CONN_REQ, sizeof(req), &req);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index eedf2c94820e..753fc4221f3c 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -217,14 +217,6 @@ static bool br_devices_support_netpoll(struct net_bridge *br)
217 return count != 0 && ret; 217 return count != 0 && ret;
218} 218}
219 219
220static void br_poll_controller(struct net_device *br_dev)
221{
222 struct netpoll *np = br_dev->npinfo->netpoll;
223
224 if (np->real_dev != br_dev)
225 netpoll_poll_dev(np->real_dev);
226}
227
228void br_netpoll_cleanup(struct net_device *dev) 220void br_netpoll_cleanup(struct net_device *dev)
229{ 221{
230 struct net_bridge *br = netdev_priv(dev); 222 struct net_bridge *br = netdev_priv(dev);
@@ -295,7 +287,6 @@ static const struct net_device_ops br_netdev_ops = {
295 .ndo_do_ioctl = br_dev_ioctl, 287 .ndo_do_ioctl = br_dev_ioctl,
296#ifdef CONFIG_NET_POLL_CONTROLLER 288#ifdef CONFIG_NET_POLL_CONTROLLER
297 .ndo_netpoll_cleanup = br_netpoll_cleanup, 289 .ndo_netpoll_cleanup = br_netpoll_cleanup,
298 .ndo_poll_controller = br_poll_controller,
299#endif 290#endif
300}; 291};
301 292
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index a4e72a89e4ff..595da45f9088 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -50,14 +50,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
50 kfree_skb(skb); 50 kfree_skb(skb);
51 else { 51 else {
52 skb_push(skb, ETH_HLEN); 52 skb_push(skb, ETH_HLEN);
53 53 dev_queue_xmit(skb);
54#ifdef CONFIG_NET_POLL_CONTROLLER
55 if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
56 netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
57 skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
58 } else
59#endif
60 dev_queue_xmit(skb);
61 } 54 }
62 } 55 }
63 56
@@ -73,23 +66,9 @@ int br_forward_finish(struct sk_buff *skb)
73 66
74static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) 67static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
75{ 68{
76#ifdef CONFIG_NET_POLL_CONTROLLER
77 struct net_bridge *br = to->br;
78 if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
79 struct netpoll *np;
80 to->dev->npinfo = skb->dev->npinfo;
81 np = skb->dev->npinfo->netpoll;
82 np->real_dev = np->dev = to->dev;
83 to->dev->priv_flags |= IFF_IN_NETPOLL;
84 }
85#endif
86 skb->dev = to->dev; 69 skb->dev = to->dev;
87 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 70 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
88 br_forward_finish); 71 br_forward_finish);
89#ifdef CONFIG_NET_POLL_CONTROLLER
90 if (skb->dev->npinfo)
91 skb->dev->npinfo->netpoll->dev = br->dev;
92#endif
93} 72}
94 73
95static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 74static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 9d21d98ae5fa..27ae946363f1 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -99,6 +99,15 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
99 return NULL; 99 return NULL;
100} 100}
101 101
102static struct net_bridge_mdb_entry *br_mdb_ip_get(
103 struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
104{
105 if (!mdb)
106 return NULL;
107
108 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
109}
110
102static struct net_bridge_mdb_entry *br_mdb_ip4_get( 111static struct net_bridge_mdb_entry *br_mdb_ip4_get(
103 struct net_bridge_mdb_htable *mdb, __be32 dst) 112 struct net_bridge_mdb_htable *mdb, __be32 dst)
104{ 113{
@@ -107,7 +116,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(
107 br_dst.u.ip4 = dst; 116 br_dst.u.ip4 = dst;
108 br_dst.proto = htons(ETH_P_IP); 117 br_dst.proto = htons(ETH_P_IP);
109 118
110 return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst)); 119 return br_mdb_ip_get(mdb, &br_dst);
111} 120}
112 121
113#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 122#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -119,23 +128,17 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
119 ipv6_addr_copy(&br_dst.u.ip6, dst); 128 ipv6_addr_copy(&br_dst.u.ip6, dst);
120 br_dst.proto = htons(ETH_P_IPV6); 129 br_dst.proto = htons(ETH_P_IPV6);
121 130
122 return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst)); 131 return br_mdb_ip_get(mdb, &br_dst);
123} 132}
124#endif 133#endif
125 134
126static struct net_bridge_mdb_entry *br_mdb_ip_get(
127 struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
128{
129 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
130}
131
132struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 135struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
133 struct sk_buff *skb) 136 struct sk_buff *skb)
134{ 137{
135 struct net_bridge_mdb_htable *mdb = br->mdb; 138 struct net_bridge_mdb_htable *mdb = br->mdb;
136 struct br_ip ip; 139 struct br_ip ip;
137 140
138 if (!mdb || br->multicast_disabled) 141 if (br->multicast_disabled)
139 return NULL; 142 return NULL;
140 143
141 if (BR_INPUT_SKB_CB(skb)->igmp) 144 if (BR_INPUT_SKB_CB(skb)->igmp)
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 44420992f72f..8fb75f89c4aa 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -591,6 +591,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
591 591
592 pskb_trim_rcsum(skb, len); 592 pskb_trim_rcsum(skb, len);
593 593
594 /* BUG: Should really parse the IP options here. */
595 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
596
594 nf_bridge_put(skb->nf_bridge); 597 nf_bridge_put(skb->nf_bridge);
595 if (!nf_bridge_alloc(skb)) 598 if (!nf_bridge_alloc(skb))
596 return NF_DROP; 599 return NF_DROP;
diff --git a/net/core/dev.c b/net/core/dev.c
index 2b3bf53bc687..0ea10f849be8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1553,6 +1553,24 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1553 rcu_read_unlock(); 1553 rcu_read_unlock();
1554} 1554}
1555 1555
1556/*
1557 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1558 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1559 */
1560void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1561{
1562 unsigned int real_num = dev->real_num_tx_queues;
1563
1564 if (unlikely(txq > dev->num_tx_queues))
1565 ;
1566 else if (txq > real_num)
1567 dev->real_num_tx_queues = txq;
1568 else if (txq < real_num) {
1569 dev->real_num_tx_queues = txq;
1570 qdisc_reset_all_tx_gt(dev, txq);
1571 }
1572}
1573EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1556 1574
1557static inline void __netif_reschedule(struct Qdisc *q) 1575static inline void __netif_reschedule(struct Qdisc *q)
1558{ 1576{
@@ -1893,8 +1911,16 @@ static int dev_gso_segment(struct sk_buff *skb)
1893 */ 1911 */
1894static inline void skb_orphan_try(struct sk_buff *skb) 1912static inline void skb_orphan_try(struct sk_buff *skb)
1895{ 1913{
1896 if (!skb_tx(skb)->flags) 1914 struct sock *sk = skb->sk;
1915
1916 if (sk && !skb_tx(skb)->flags) {
1917 /* skb_tx_hash() wont be able to get sk.
1918 * We copy sk_hash into skb->rxhash
1919 */
1920 if (!skb->rxhash)
1921 skb->rxhash = sk->sk_hash;
1897 skb_orphan(skb); 1922 skb_orphan(skb);
1923 }
1898} 1924}
1899 1925
1900int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 1926int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -1980,8 +2006,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1980 if (skb->sk && skb->sk->sk_hash) 2006 if (skb->sk && skb->sk->sk_hash)
1981 hash = skb->sk->sk_hash; 2007 hash = skb->sk->sk_hash;
1982 else 2008 else
1983 hash = (__force u16) skb->protocol; 2009 hash = (__force u16) skb->protocol ^ skb->rxhash;
1984
1985 hash = jhash_1word(hash, hashrnd); 2010 hash = jhash_1word(hash, hashrnd);
1986 2011
1987 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); 2012 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
@@ -2004,12 +2029,11 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2004static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2029static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2005 struct sk_buff *skb) 2030 struct sk_buff *skb)
2006{ 2031{
2007 u16 queue_index; 2032 int queue_index;
2008 struct sock *sk = skb->sk; 2033 struct sock *sk = skb->sk;
2009 2034
2010 if (sk_tx_queue_recorded(sk)) { 2035 queue_index = sk_tx_queue_get(sk);
2011 queue_index = sk_tx_queue_get(sk); 2036 if (queue_index < 0) {
2012 } else {
2013 const struct net_device_ops *ops = dev->netdev_ops; 2037 const struct net_device_ops *ops = dev->netdev_ops;
2014 2038
2015 if (ops->ndo_select_queue) { 2039 if (ops->ndo_select_queue) {
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a0f4964033d2..75e4ffeb8cc9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -318,23 +318,33 @@ out:
318} 318}
319 319
320static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, 320static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
321 void __user *useraddr) 321 u32 cmd, void __user *useraddr)
322{ 322{
323 struct ethtool_rxnfc cmd; 323 struct ethtool_rxnfc info;
324 size_t info_size = sizeof(info);
324 325
325 if (!dev->ethtool_ops->set_rxnfc) 326 if (!dev->ethtool_ops->set_rxnfc)
326 return -EOPNOTSUPP; 327 return -EOPNOTSUPP;
327 328
328 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 329 /* struct ethtool_rxnfc was originally defined for
330 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
331 * members. User-space might still be using that
332 * definition. */
333 if (cmd == ETHTOOL_SRXFH)
334 info_size = (offsetof(struct ethtool_rxnfc, data) +
335 sizeof(info.data));
336
337 if (copy_from_user(&info, useraddr, info_size))
329 return -EFAULT; 338 return -EFAULT;
330 339
331 return dev->ethtool_ops->set_rxnfc(dev, &cmd); 340 return dev->ethtool_ops->set_rxnfc(dev, &info);
332} 341}
333 342
334static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, 343static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
335 void __user *useraddr) 344 u32 cmd, void __user *useraddr)
336{ 345{
337 struct ethtool_rxnfc info; 346 struct ethtool_rxnfc info;
347 size_t info_size = sizeof(info);
338 const struct ethtool_ops *ops = dev->ethtool_ops; 348 const struct ethtool_ops *ops = dev->ethtool_ops;
339 int ret; 349 int ret;
340 void *rule_buf = NULL; 350 void *rule_buf = NULL;
@@ -342,13 +352,22 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
342 if (!ops->get_rxnfc) 352 if (!ops->get_rxnfc)
343 return -EOPNOTSUPP; 353 return -EOPNOTSUPP;
344 354
345 if (copy_from_user(&info, useraddr, sizeof(info))) 355 /* struct ethtool_rxnfc was originally defined for
356 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
357 * members. User-space might still be using that
358 * definition. */
359 if (cmd == ETHTOOL_GRXFH)
360 info_size = (offsetof(struct ethtool_rxnfc, data) +
361 sizeof(info.data));
362
363 if (copy_from_user(&info, useraddr, info_size))
346 return -EFAULT; 364 return -EFAULT;
347 365
348 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 366 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
349 if (info.rule_cnt > 0) { 367 if (info.rule_cnt > 0) {
350 rule_buf = kmalloc(info.rule_cnt * sizeof(u32), 368 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
351 GFP_USER); 369 rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
370 GFP_USER);
352 if (!rule_buf) 371 if (!rule_buf)
353 return -ENOMEM; 372 return -ENOMEM;
354 } 373 }
@@ -359,7 +378,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
359 goto err_out; 378 goto err_out;
360 379
361 ret = -EFAULT; 380 ret = -EFAULT;
362 if (copy_to_user(useraddr, &info, sizeof(info))) 381 if (copy_to_user(useraddr, &info, info_size))
363 goto err_out; 382 goto err_out;
364 383
365 if (rule_buf) { 384 if (rule_buf) {
@@ -1516,12 +1535,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1516 case ETHTOOL_GRXCLSRLCNT: 1535 case ETHTOOL_GRXCLSRLCNT:
1517 case ETHTOOL_GRXCLSRULE: 1536 case ETHTOOL_GRXCLSRULE:
1518 case ETHTOOL_GRXCLSRLALL: 1537 case ETHTOOL_GRXCLSRLALL:
1519 rc = ethtool_get_rxnfc(dev, useraddr); 1538 rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
1520 break; 1539 break;
1521 case ETHTOOL_SRXFH: 1540 case ETHTOOL_SRXFH:
1522 case ETHTOOL_SRXCLSRLDEL: 1541 case ETHTOOL_SRXCLSRLDEL:
1523 case ETHTOOL_SRXCLSRLINS: 1542 case ETHTOOL_SRXCLSRLINS:
1524 rc = ethtool_set_rxnfc(dev, useraddr); 1543 rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
1525 break; 1544 break;
1526 case ETHTOOL_GGRO: 1545 case ETHTOOL_GGRO:
1527 rc = ethtool_get_gro(dev, useraddr); 1546 rc = ethtool_get_gro(dev, useraddr);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 6ba1c0eece03..a4e0a7482c2b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -949,7 +949,10 @@ static void neigh_update_hhs(struct neighbour *neigh)
949{ 949{
950 struct hh_cache *hh; 950 struct hh_cache *hh;
951 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 951 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
952 = neigh->dev->header_ops->cache_update; 952 = NULL;
953
954 if (neigh->dev->header_ops)
955 update = neigh->dev->header_ops->cache_update;
953 956
954 if (update) { 957 if (update) {
955 for (hh = neigh->hh; hh; hh = hh->hh_next) { 958 for (hh = neigh->hh; hh; hh = hh->hh_next) {
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index c51b55400dc5..11201784d29a 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,7 +1,7 @@
1menuconfig NET_DSA 1menuconfig NET_DSA
2 bool "Distributed Switch Architecture support" 2 bool "Distributed Switch Architecture support"
3 default n 3 default n
4 depends on EXPERIMENTAL && !S390 4 depends on EXPERIMENTAL && NET_ETHERNET && !S390
5 select PHYLIB 5 select PHYLIB
6 ---help--- 6 ---help---
7 This allows you to use hardware switch chips that use 7 This allows you to use hardware switch chips that use
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 757f25eb9b4b..7f6273506eea 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -442,8 +442,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
442 int err; 442 int err;
443 443
444 err = ipmr_fib_lookup(net, &fl, &mrt); 444 err = ipmr_fib_lookup(net, &fl, &mrt);
445 if (err < 0) 445 if (err < 0) {
446 kfree_skb(skb);
446 return err; 447 return err;
448 }
447 449
448 read_lock(&mrt_lock); 450 read_lock(&mrt_lock);
449 dev->stats.tx_bytes += skb->len; 451 dev->stats.tx_bytes += skb->len;
@@ -1728,8 +1730,10 @@ int ip_mr_input(struct sk_buff *skb)
1728 goto dont_forward; 1730 goto dont_forward;
1729 1731
1730 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); 1732 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
1731 if (err < 0) 1733 if (err < 0) {
1734 kfree_skb(skb);
1732 return err; 1735 return err;
1736 }
1733 1737
1734 if (!local) { 1738 if (!local) {
1735 if (IPCB(skb)->opt.router_alert) { 1739 if (IPCB(skb)->opt.router_alert) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6596b4feeddc..65afeaec15b7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -608,6 +608,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
608 ssize_t spliced; 608 ssize_t spliced;
609 int ret; 609 int ret;
610 610
611 sock_rps_record_flow(sk);
611 /* 612 /*
612 * We can't seek on a socket input 613 * We can't seek on a socket input
613 */ 614 */
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b4ed957f201a..7ed9dc1042d1 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2208,6 +2208,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2208 int mib_idx; 2208 int mib_idx;
2209 int fwd_rexmitting = 0; 2209 int fwd_rexmitting = 0;
2210 2210
2211 if (!tp->packets_out)
2212 return;
2213
2211 if (!tp->lost_out) 2214 if (!tp->lost_out)
2212 tp->retransmit_high = tp->snd_una; 2215 tp->retransmit_high = tp->snd_una;
2213 2216
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 1705476670ef..23883a48ebfb 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -108,6 +108,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
108 u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 108 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
109 109
110 memset(fl, 0, sizeof(struct flowi)); 110 memset(fl, 0, sizeof(struct flowi));
111 fl->mark = skb->mark;
112
111 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) { 113 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
112 switch (iph->protocol) { 114 switch (iph->protocol) {
113 case IPPROTO_UDP: 115 case IPPROTO_UDP:
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 2794b6002836..d6e9599d0705 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -347,11 +347,12 @@ static const struct xfrm_type mip6_destopt_type =
347 347
348static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) 348static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
349{ 349{
350 struct ipv6hdr *iph = ipv6_hdr(skb);
350 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data; 351 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
351 int err = rt2->rt_hdr.nexthdr; 352 int err = rt2->rt_hdr.nexthdr;
352 353
353 spin_lock(&x->lock); 354 spin_lock(&x->lock);
354 if (!ipv6_addr_equal(&rt2->addr, (struct in6_addr *)x->coaddr) && 355 if (!ipv6_addr_equal(&iph->daddr, (struct in6_addr *)x->coaddr) &&
355 !ipv6_addr_any((struct in6_addr *)x->coaddr)) 356 !ipv6_addr_any((struct in6_addr *)x->coaddr))
356 err = -ENOENT; 357 err = -ENOENT;
357 spin_unlock(&x->lock); 358 spin_unlock(&x->lock);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 47d227713758..2933396e0281 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -97,9 +97,11 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
97 fl.fl_ip_dport = otcph.source; 97 fl.fl_ip_dport = otcph.source;
98 security_skb_classify_flow(oldskb, &fl); 98 security_skb_classify_flow(oldskb, &fl);
99 dst = ip6_route_output(net, NULL, &fl); 99 dst = ip6_route_output(net, NULL, &fl);
100 if (dst == NULL) 100 if (dst == NULL || dst->error) {
101 dst_release(dst);
101 return; 102 return;
102 if (dst->error || xfrm_lookup(net, &dst, &fl, NULL, 0)) 103 }
104 if (xfrm_lookup(net, &dst, &fl, NULL, 0))
103 return; 105 return;
104 106
105 hh_len = (dst->dev->hard_header_len + 15)&~15; 107 hh_len = (dst->dev->hard_header_len + 15)&~15;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4a0e77e14468..6baeabbbca82 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,6 +124,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
124 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 124 u8 nexthdr = nh[IP6CB(skb)->nhoff];
125 125
126 memset(fl, 0, sizeof(struct flowi)); 126 memset(fl, 0, sizeof(struct flowi));
127 fl->mark = skb->mark;
128
127 ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr); 129 ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr);
128 ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr); 130 ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr);
129 131
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 94d72e85a475..b2a3ae6cad78 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -698,6 +698,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
698 newsk = NULL; 698 newsk = NULL;
699 goto out; 699 goto out;
700 } 700 }
701 kfree_skb(oskb);
701 702
702 sock_hold(sk); 703 sock_hold(sk);
703 pep_sk(newsk)->listener = sk; 704 pep_sk(newsk)->listener = sk;
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 570949417f38..724553e8ed7b 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -205,7 +205,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
205 { 205 {
206 struct icmphdr *icmph; 206 struct icmphdr *icmph;
207 207
208 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) 208 if (!pskb_may_pull(skb, ihl + sizeof(*icmph)))
209 goto drop; 209 goto drop;
210 210
211 icmph = (void *)(skb_network_header(skb) + ihl); 211 icmph = (void *)(skb_network_header(skb) + ihl);
@@ -215,6 +215,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
215 (icmph->type != ICMP_PARAMETERPROB)) 215 (icmph->type != ICMP_PARAMETERPROB))
216 break; 216 break;
217 217
218 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
219 goto drop;
220
218 iph = (void *)(icmph + 1); 221 iph = (void *)(icmph + 1);
219 if (egress) 222 if (egress)
220 addr = iph->daddr; 223 addr = iph->daddr;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 73affb8624fa..8dc47f1d0001 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -267,7 +267,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
267 * Run memory cache shrinker. 267 * Run memory cache shrinker.
268 */ 268 */
269static int 269static int
270rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) 270rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
271{ 271{
272 LIST_HEAD(free); 272 LIST_HEAD(free);
273 int res; 273 int res;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index af1c173be4ad..a7ec5a8a2380 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1594,8 +1594,8 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1594 1594
1595 /* Try to instantiate a bundle */ 1595 /* Try to instantiate a bundle */
1596 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1596 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1597 if (err < 0) { 1597 if (err <= 0) {
1598 if (err != -EAGAIN) 1598 if (err != 0 && err != -EAGAIN)
1599 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1599 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1600 return ERR_PTR(err); 1600 return ERR_PTR(err);
1601 } 1601 }
@@ -1678,6 +1678,13 @@ xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
1678 goto make_dummy_bundle; 1678 goto make_dummy_bundle;
1679 dst_hold(&xdst->u.dst); 1679 dst_hold(&xdst->u.dst);
1680 return oldflo; 1680 return oldflo;
1681 } else if (new_xdst == NULL) {
1682 num_xfrms = 0;
1683 if (oldflo == NULL)
1684 goto make_dummy_bundle;
1685 xdst->num_xfrms = 0;
1686 dst_hold(&xdst->u.dst);
1687 return oldflo;
1681 } 1688 }
1682 1689
1683 /* Kill the previous bundle */ 1690 /* Kill the previous bundle */
@@ -1760,6 +1767,10 @@ restart:
1760 xfrm_pols_put(pols, num_pols); 1767 xfrm_pols_put(pols, num_pols);
1761 err = PTR_ERR(xdst); 1768 err = PTR_ERR(xdst);
1762 goto dropdst; 1769 goto dropdst;
1770 } else if (xdst == NULL) {
1771 num_xfrms = 0;
1772 drop_pols = num_pols;
1773 goto no_transform;
1763 } 1774 }
1764 1775
1765 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 1776 spin_lock_bh(&xfrm_policy_sk_bundle_lock);