aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bluetooth/hci_conn.c5
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/l2cap.c14
-rw-r--r--net/bridge/br_device.c9
-rw-r--r--net/bridge/br_fdb.c6
-rw-r--r--net/bridge/br_forward.c27
-rw-r--r--net/bridge/br_multicast.c21
-rw-r--r--net/bridge/br_netfilter.c3
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/caif/cfserl.c6
-rw-r--r--net/caif/cfveil.c2
-rw-r--r--net/core/dev.c72
-rw-r--r--net/core/ethtool.c41
-rw-r--r--net/core/gen_estimator.c15
-rw-r--r--net/core/neighbour.c5
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/skbuff.c51
-rw-r--r--net/dsa/Kconfig2
-rw-r--r--net/ipv4/Kconfig10
-rw-r--r--net/ipv4/ip_output.c9
-rw-r--r--net/ipv4/ipmr.c12
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_hybla.c4
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c14
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ip6mr.c6
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/mip6.c3
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c6
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/mac80211/agg-tx.c6
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/chan.c2
-rw-r--r--net/mac80211/driver-ops.h2
-rw-r--r--net/mac80211/mlme.c92
-rw-r--r--net/mac80211/rx.c16
-rw-r--r--net/mac80211/work.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/x_tables.c17
-rw-r--r--net/phonet/pep.c7
-rw-r--r--net/rds/ib_cm.c1
-rw-r--r--net/rds/iw_cm.c1
-rw-r--r--net/sched/act_mirred.c43
-rw-r--r--net/sched/act_nat.c9
-rw-r--r--net/sched/act_pedit.c24
-rw-r--r--net/sched/cls_u32.c49
-rw-r--r--net/sched/sch_teql.c1
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/xprtsock.c38
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c19
64 files changed, 519 insertions, 222 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index bd537fc10254..50f58f5f1c34 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
12 return NET_RX_DROP; 12 return NET_RX_DROP;
13 13
14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
15 goto drop; 15 skb->deliver_no_wcard = 1;
16 16
17 skb->skb_iif = skb->dev->ifindex; 17 skb->skb_iif = skb->dev->ifindex;
18 __vlan_hwaccel_put_tag(skb, vlan_tci); 18 __vlan_hwaccel_put_tag(skb, vlan_tci);
@@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
84 struct sk_buff *p; 84 struct sk_buff *p;
85 85
86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
87 goto drop; 87 skb->deliver_no_wcard = 1;
88 88
89 skb->skb_iif = skb->dev->ifindex; 89 skb->skb_iif = skb->dev->ifindex;
90 __vlan_hwaccel_put_tag(skb, vlan_tci); 90 __vlan_hwaccel_put_tag(skb, vlan_tci);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 55be90826f5f..529842677817 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev)
708 netif_carrier_off(dev); 708 netif_carrier_off(dev);
709 709
710 /* IFF_BROADCAST|IFF_MULTICAST; ??? */ 710 /* IFF_BROADCAST|IFF_MULTICAST; ??? */
711 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); 711 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
712 IFF_MASTER | IFF_SLAVE);
712 dev->iflink = real_dev->ifindex; 713 dev->iflink = real_dev->ifindex;
713 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | 714 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
714 (1<<__LINK_STATE_DORMANT))) | 715 (1<<__LINK_STATE_DORMANT))) |
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 0faad5ce6dc4..8c100c9dae28 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -104,6 +104,8 @@ static void bnep_net_set_mc_list(struct net_device *dev)
104 break; 104 break;
105 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); 105 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
106 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); 106 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
107
108 i++;
107 } 109 }
108 r->len = htons(skb->len - len); 110 r->len = htons(skb->len - len);
109 } 111 }
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b10e3cdb08f8..800b6b9fbbae 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -358,6 +358,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
358 acl->sec_level = sec_level; 358 acl->sec_level = sec_level;
359 acl->auth_type = auth_type; 359 acl->auth_type = auth_type;
360 hci_acl_connect(acl); 360 hci_acl_connect(acl);
361 } else {
362 if (acl->sec_level < sec_level)
363 acl->sec_level = sec_level;
364 if (acl->auth_type < auth_type)
365 acl->auth_type = auth_type;
361 } 366 }
362 367
363 if (type == ACL_LINK) 368 if (type == ACL_LINK)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 6c57fc71c7e2..786b5de0bac4 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1049,6 +1049,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1049 if (conn) { 1049 if (conn) {
1050 if (!ev->status) 1050 if (!ev->status)
1051 conn->link_mode |= HCI_LM_AUTH; 1051 conn->link_mode |= HCI_LM_AUTH;
1052 else
1053 conn->sec_level = BT_SECURITY_LOW;
1052 1054
1053 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1055 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1054 1056
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 1b682a5aa061..cf3c4073a8a6 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -401,6 +401,11 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
401 l2cap_send_sframe(pi, control); 401 l2cap_send_sframe(pi, control);
402} 402}
403 403
404static inline int __l2cap_no_conn_pending(struct sock *sk)
405{
406 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
407}
408
404static void l2cap_do_start(struct sock *sk) 409static void l2cap_do_start(struct sock *sk)
405{ 410{
406 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 411 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
@@ -409,12 +414,13 @@ static void l2cap_do_start(struct sock *sk)
409 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 414 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
410 return; 415 return;
411 416
412 if (l2cap_check_security(sk)) { 417 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
413 struct l2cap_conn_req req; 418 struct l2cap_conn_req req;
414 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 419 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
415 req.psm = l2cap_pi(sk)->psm; 420 req.psm = l2cap_pi(sk)->psm;
416 421
417 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 422 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
423 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
418 424
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 425 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req); 426 L2CAP_CONN_REQ, sizeof(req), &req);
@@ -464,12 +470,14 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
464 } 470 }
465 471
466 if (sk->sk_state == BT_CONNECT) { 472 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk)) { 473 if (l2cap_check_security(sk) &&
474 __l2cap_no_conn_pending(sk)) {
468 struct l2cap_conn_req req; 475 struct l2cap_conn_req req;
469 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 476 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
470 req.psm = l2cap_pi(sk)->psm; 477 req.psm = l2cap_pi(sk)->psm;
471 478
472 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 479 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
480 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
473 481
474 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 482 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
475 L2CAP_CONN_REQ, sizeof(req), &req); 483 L2CAP_CONN_REQ, sizeof(req), &req);
@@ -2912,7 +2920,6 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2912 l2cap_pi(sk)->ident = 0; 2920 l2cap_pi(sk)->ident = 0;
2913 l2cap_pi(sk)->dcid = dcid; 2921 l2cap_pi(sk)->dcid = dcid;
2914 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2922 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2915
2916 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 2923 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2917 2924
2918 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2925 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
@@ -4404,6 +4411,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4404 req.psm = l2cap_pi(sk)->psm; 4411 req.psm = l2cap_pi(sk)->psm;
4405 4412
4406 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 4413 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4414 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4407 4415
4408 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 4416 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4409 L2CAP_CONN_REQ, sizeof(req), &req); 4417 L2CAP_CONN_REQ, sizeof(req), &req);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index eedf2c94820e..753fc4221f3c 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -217,14 +217,6 @@ static bool br_devices_support_netpoll(struct net_bridge *br)
217 return count != 0 && ret; 217 return count != 0 && ret;
218} 218}
219 219
220static void br_poll_controller(struct net_device *br_dev)
221{
222 struct netpoll *np = br_dev->npinfo->netpoll;
223
224 if (np->real_dev != br_dev)
225 netpoll_poll_dev(np->real_dev);
226}
227
228void br_netpoll_cleanup(struct net_device *dev) 220void br_netpoll_cleanup(struct net_device *dev)
229{ 221{
230 struct net_bridge *br = netdev_priv(dev); 222 struct net_bridge *br = netdev_priv(dev);
@@ -295,7 +287,6 @@ static const struct net_device_ops br_netdev_ops = {
295 .ndo_do_ioctl = br_dev_ioctl, 287 .ndo_do_ioctl = br_dev_ioctl,
296#ifdef CONFIG_NET_POLL_CONTROLLER 288#ifdef CONFIG_NET_POLL_CONTROLLER
297 .ndo_netpoll_cleanup = br_netpoll_cleanup, 289 .ndo_netpoll_cleanup = br_netpoll_cleanup,
298 .ndo_poll_controller = br_poll_controller,
299#endif 290#endif
300}; 291};
301 292
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 26637439965b..b01dde35a69e 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -128,7 +128,7 @@ void br_fdb_cleanup(unsigned long _data)
128{ 128{
129 struct net_bridge *br = (struct net_bridge *)_data; 129 struct net_bridge *br = (struct net_bridge *)_data;
130 unsigned long delay = hold_time(br); 130 unsigned long delay = hold_time(br);
131 unsigned long next_timer = jiffies + br->forward_delay; 131 unsigned long next_timer = jiffies + br->ageing_time;
132 int i; 132 int i;
133 133
134 spin_lock_bh(&br->hash_lock); 134 spin_lock_bh(&br->hash_lock);
@@ -149,9 +149,7 @@ void br_fdb_cleanup(unsigned long _data)
149 } 149 }
150 spin_unlock_bh(&br->hash_lock); 150 spin_unlock_bh(&br->hash_lock);
151 151
152 /* Add HZ/4 to ensure we round the jiffies upwards to be after the next 152 mod_timer(&br->gc_timer, round_jiffies_up(next_timer));
153 * timer, otherwise we might round down and will have no-op run. */
154 mod_timer(&br->gc_timer, round_jiffies(next_timer + HZ/4));
155} 153}
156 154
157/* Completely flush all dynamic entries in forwarding database.*/ 155/* Completely flush all dynamic entries in forwarding database.*/
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index a98ef1393097..595da45f9088 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -50,14 +50,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
50 kfree_skb(skb); 50 kfree_skb(skb);
51 else { 51 else {
52 skb_push(skb, ETH_HLEN); 52 skb_push(skb, ETH_HLEN);
53 53 dev_queue_xmit(skb);
54#ifdef CONFIG_NET_POLL_CONTROLLER
55 if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
56 netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
57 skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
58 } else
59#endif
60 dev_queue_xmit(skb);
61 } 54 }
62 } 55 }
63 56
@@ -73,23 +66,9 @@ int br_forward_finish(struct sk_buff *skb)
73 66
74static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) 67static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
75{ 68{
76#ifdef CONFIG_NET_POLL_CONTROLLER
77 struct net_bridge *br = to->br;
78 if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
79 struct netpoll *np;
80 to->dev->npinfo = skb->dev->npinfo;
81 np = skb->dev->npinfo->netpoll;
82 np->real_dev = np->dev = to->dev;
83 to->dev->priv_flags |= IFF_IN_NETPOLL;
84 }
85#endif
86 skb->dev = to->dev; 69 skb->dev = to->dev;
87 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 70 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
88 br_forward_finish); 71 br_forward_finish);
89#ifdef CONFIG_NET_POLL_CONTROLLER
90 if (skb->dev->npinfo)
91 skb->dev->npinfo->netpoll->dev = br->dev;
92#endif
93} 72}
94 73
95static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 74static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
@@ -140,10 +119,10 @@ static int deliver_clone(const struct net_bridge_port *prev,
140 void (*__packet_hook)(const struct net_bridge_port *p, 119 void (*__packet_hook)(const struct net_bridge_port *p,
141 struct sk_buff *skb)) 120 struct sk_buff *skb))
142{ 121{
122 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
123
143 skb = skb_clone(skb, GFP_ATOMIC); 124 skb = skb_clone(skb, GFP_ATOMIC);
144 if (!skb) { 125 if (!skb) {
145 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
146
147 dev->stats.tx_dropped++; 126 dev->stats.tx_dropped++;
148 return -ENOMEM; 127 return -ENOMEM;
149 } 128 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 9d21d98ae5fa..27ae946363f1 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -99,6 +99,15 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
99 return NULL; 99 return NULL;
100} 100}
101 101
102static struct net_bridge_mdb_entry *br_mdb_ip_get(
103 struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
104{
105 if (!mdb)
106 return NULL;
107
108 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
109}
110
102static struct net_bridge_mdb_entry *br_mdb_ip4_get( 111static struct net_bridge_mdb_entry *br_mdb_ip4_get(
103 struct net_bridge_mdb_htable *mdb, __be32 dst) 112 struct net_bridge_mdb_htable *mdb, __be32 dst)
104{ 113{
@@ -107,7 +116,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(
107 br_dst.u.ip4 = dst; 116 br_dst.u.ip4 = dst;
108 br_dst.proto = htons(ETH_P_IP); 117 br_dst.proto = htons(ETH_P_IP);
109 118
110 return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst)); 119 return br_mdb_ip_get(mdb, &br_dst);
111} 120}
112 121
113#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 122#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -119,23 +128,17 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
119 ipv6_addr_copy(&br_dst.u.ip6, dst); 128 ipv6_addr_copy(&br_dst.u.ip6, dst);
120 br_dst.proto = htons(ETH_P_IPV6); 129 br_dst.proto = htons(ETH_P_IPV6);
121 130
122 return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst)); 131 return br_mdb_ip_get(mdb, &br_dst);
123} 132}
124#endif 133#endif
125 134
126static struct net_bridge_mdb_entry *br_mdb_ip_get(
127 struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
128{
129 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
130}
131
132struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 135struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
133 struct sk_buff *skb) 136 struct sk_buff *skb)
134{ 137{
135 struct net_bridge_mdb_htable *mdb = br->mdb; 138 struct net_bridge_mdb_htable *mdb = br->mdb;
136 struct br_ip ip; 139 struct br_ip ip;
137 140
138 if (!mdb || br->multicast_disabled) 141 if (br->multicast_disabled)
139 return NULL; 142 return NULL;
140 143
141 if (BR_INPUT_SKB_CB(skb)->igmp) 144 if (BR_INPUT_SKB_CB(skb)->igmp)
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 44420992f72f..8fb75f89c4aa 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -591,6 +591,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
591 591
592 pskb_trim_rcsum(skb, len); 592 pskb_trim_rcsum(skb, len);
593 593
594 /* BUG: Should really parse the IP options here. */
595 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
596
594 nf_bridge_put(skb->nf_bridge); 597 nf_bridge_put(skb->nf_bridge);
595 if (!nf_bridge_alloc(skb)) 598 if (!nf_bridge_alloc(skb))
596 return NF_DROP; 599 return NF_DROP;
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index cd2830fec935..fd27b172fb5d 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
83 if (!cfsrvl_ready(service, &ret)) 83 if (!cfsrvl_ready(service, &ret))
84 return ret; 84 return ret;
85 85
86 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 86 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
87 pr_err("CAIF: %s():Packet too large - size=%d\n", 87 pr_err("CAIF: %s():Packet too large - size=%d\n",
88 __func__, cfpkt_getlen(pkt)); 88 __func__, cfpkt_getlen(pkt));
89 return -EOVERFLOW; 89 return -EOVERFLOW;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index cb4325a3dc83..965c5baace40 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -59,16 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
59 u8 stx = CFSERL_STX; 59 u8 stx = CFSERL_STX;
60 int ret; 60 int ret;
61 u16 expectlen = 0; 61 u16 expectlen = 0;
62
62 caif_assert(newpkt != NULL); 63 caif_assert(newpkt != NULL);
63 spin_lock(&layr->sync); 64 spin_lock(&layr->sync);
64 65
65 if (layr->incomplete_frm != NULL) { 66 if (layr->incomplete_frm != NULL) {
66
67 layr->incomplete_frm = 67 layr->incomplete_frm =
68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen); 68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
69 pkt = layr->incomplete_frm; 69 pkt = layr->incomplete_frm;
70 if (pkt == NULL) 70 if (pkt == NULL) {
71 spin_unlock(&layr->sync);
71 return -ENOMEM; 72 return -ENOMEM;
73 }
72 } else { 74 } else {
73 pkt = newpkt; 75 pkt = newpkt;
74 } 76 }
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 0fd827f49491..e04f7d964e83 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
84 return ret; 84 return ret;
85 caif_assert(layr->dn != NULL); 85 caif_assert(layr->dn != NULL);
86 caif_assert(layr->dn->transmit != NULL); 86 caif_assert(layr->dn->transmit != NULL);
87 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 87 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
88 pr_warning("CAIF: %s(): Packet too large - size=%d\n", 88 pr_warning("CAIF: %s(): Packet too large - size=%d\n",
89 __func__, cfpkt_getlen(pkt)); 89 __func__, cfpkt_getlen(pkt));
90 return -EOVERFLOW; 90 return -EOVERFLOW;
diff --git a/net/core/dev.c b/net/core/dev.c
index 1845b08c624e..1f466e82ac33 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1488,6 +1488,7 @@ static inline void net_timestamp_check(struct sk_buff *skb)
1488int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1488int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1489{ 1489{
1490 skb_orphan(skb); 1490 skb_orphan(skb);
1491 nf_reset(skb);
1491 1492
1492 if (!(dev->flags & IFF_UP) || 1493 if (!(dev->flags & IFF_UP) ||
1493 (skb->len > (dev->mtu + dev->hard_header_len))) { 1494 (skb->len > (dev->mtu + dev->hard_header_len))) {
@@ -1553,6 +1554,24 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1553 rcu_read_unlock(); 1554 rcu_read_unlock();
1554} 1555}
1555 1556
1557/*
1558 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1559 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1560 */
1561void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1562{
1563 unsigned int real_num = dev->real_num_tx_queues;
1564
1565 if (unlikely(txq > dev->num_tx_queues))
1566 ;
1567 else if (txq > real_num)
1568 dev->real_num_tx_queues = txq;
1569 else if (txq < real_num) {
1570 dev->real_num_tx_queues = txq;
1571 qdisc_reset_all_tx_gt(dev, txq);
1572 }
1573}
1574EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1556 1575
1557static inline void __netif_reschedule(struct Qdisc *q) 1576static inline void __netif_reschedule(struct Qdisc *q)
1558{ 1577{
@@ -1893,8 +1912,16 @@ static int dev_gso_segment(struct sk_buff *skb)
1893 */ 1912 */
1894static inline void skb_orphan_try(struct sk_buff *skb) 1913static inline void skb_orphan_try(struct sk_buff *skb)
1895{ 1914{
1896 if (!skb_tx(skb)->flags) 1915 struct sock *sk = skb->sk;
1916
1917 if (sk && !skb_tx(skb)->flags) {
1918 /* skb_tx_hash() wont be able to get sk.
1919 * We copy sk_hash into skb->rxhash
1920 */
1921 if (!skb->rxhash)
1922 skb->rxhash = sk->sk_hash;
1897 skb_orphan(skb); 1923 skb_orphan(skb);
1924 }
1898} 1925}
1899 1926
1900int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 1927int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -1980,8 +2007,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1980 if (skb->sk && skb->sk->sk_hash) 2007 if (skb->sk && skb->sk->sk_hash)
1981 hash = skb->sk->sk_hash; 2008 hash = skb->sk->sk_hash;
1982 else 2009 else
1983 hash = (__force u16) skb->protocol; 2010 hash = (__force u16) skb->protocol ^ skb->rxhash;
1984
1985 hash = jhash_1word(hash, hashrnd); 2011 hash = jhash_1word(hash, hashrnd);
1986 2012
1987 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); 2013 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
@@ -2004,12 +2030,11 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2004static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2030static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2005 struct sk_buff *skb) 2031 struct sk_buff *skb)
2006{ 2032{
2007 u16 queue_index; 2033 int queue_index;
2008 struct sock *sk = skb->sk; 2034 struct sock *sk = skb->sk;
2009 2035
2010 if (sk_tx_queue_recorded(sk)) { 2036 queue_index = sk_tx_queue_get(sk);
2011 queue_index = sk_tx_queue_get(sk); 2037 if (queue_index < 0) {
2012 } else {
2013 const struct net_device_ops *ops = dev->netdev_ops; 2038 const struct net_device_ops *ops = dev->netdev_ops;
2014 2039
2015 if (ops->ndo_select_queue) { 2040 if (ops->ndo_select_queue) {
@@ -2253,11 +2278,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2253 if (skb_rx_queue_recorded(skb)) { 2278 if (skb_rx_queue_recorded(skb)) {
2254 u16 index = skb_get_rx_queue(skb); 2279 u16 index = skb_get_rx_queue(skb);
2255 if (unlikely(index >= dev->num_rx_queues)) { 2280 if (unlikely(index >= dev->num_rx_queues)) {
2256 if (net_ratelimit()) { 2281 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2257 pr_warning("%s received packet on queue " 2282 "on queue %u, but number of RX queues is %u\n",
2258 "%u, but number of RX queues is %u\n", 2283 dev->name, index, dev->num_rx_queues);
2259 dev->name, index, dev->num_rx_queues);
2260 }
2261 goto done; 2284 goto done;
2262 } 2285 }
2263 rxqueue = dev->_rx + index; 2286 rxqueue = dev->_rx + index;
@@ -2795,7 +2818,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
2795 struct net_device *orig_dev; 2818 struct net_device *orig_dev;
2796 struct net_device *master; 2819 struct net_device *master;
2797 struct net_device *null_or_orig; 2820 struct net_device *null_or_orig;
2798 struct net_device *null_or_bond; 2821 struct net_device *orig_or_bond;
2799 int ret = NET_RX_DROP; 2822 int ret = NET_RX_DROP;
2800 __be16 type; 2823 __be16 type;
2801 2824
@@ -2812,13 +2835,24 @@ static int __netif_receive_skb(struct sk_buff *skb)
2812 if (!skb->skb_iif) 2835 if (!skb->skb_iif)
2813 skb->skb_iif = skb->dev->ifindex; 2836 skb->skb_iif = skb->dev->ifindex;
2814 2837
2838 /*
2839 * bonding note: skbs received on inactive slaves should only
2840 * be delivered to pkt handlers that are exact matches. Also
2841 * the deliver_no_wcard flag will be set. If packet handlers
2842 * are sensitive to duplicate packets these skbs will need to
2843 * be dropped at the handler. The vlan accel path may have
2844 * already set the deliver_no_wcard flag.
2845 */
2815 null_or_orig = NULL; 2846 null_or_orig = NULL;
2816 orig_dev = skb->dev; 2847 orig_dev = skb->dev;
2817 master = ACCESS_ONCE(orig_dev->master); 2848 master = ACCESS_ONCE(orig_dev->master);
2818 if (master) { 2849 if (skb->deliver_no_wcard)
2819 if (skb_bond_should_drop(skb, master)) 2850 null_or_orig = orig_dev;
2851 else if (master) {
2852 if (skb_bond_should_drop(skb, master)) {
2853 skb->deliver_no_wcard = 1;
2820 null_or_orig = orig_dev; /* deliver only exact match */ 2854 null_or_orig = orig_dev; /* deliver only exact match */
2821 else 2855 } else
2822 skb->dev = master; 2856 skb->dev = master;
2823 } 2857 }
2824 2858
@@ -2868,10 +2902,10 @@ ncls:
2868 * device that may have registered for a specific ptype. The 2902 * device that may have registered for a specific ptype. The
2869 * handler may have to adjust skb->dev and orig_dev. 2903 * handler may have to adjust skb->dev and orig_dev.
2870 */ 2904 */
2871 null_or_bond = NULL; 2905 orig_or_bond = orig_dev;
2872 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && 2906 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2873 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { 2907 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2874 null_or_bond = vlan_dev_real_dev(skb->dev); 2908 orig_or_bond = vlan_dev_real_dev(skb->dev);
2875 } 2909 }
2876 2910
2877 type = skb->protocol; 2911 type = skb->protocol;
@@ -2879,7 +2913,7 @@ ncls:
2879 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2913 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2880 if (ptype->type == type && (ptype->dev == null_or_orig || 2914 if (ptype->type == type && (ptype->dev == null_or_orig ||
2881 ptype->dev == skb->dev || ptype->dev == orig_dev || 2915 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2882 ptype->dev == null_or_bond)) { 2916 ptype->dev == orig_or_bond)) {
2883 if (pt_prev) 2917 if (pt_prev)
2884 ret = deliver_skb(skb, pt_prev, orig_dev); 2918 ret = deliver_skb(skb, pt_prev, orig_dev);
2885 pt_prev = ptype; 2919 pt_prev = ptype;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a0f4964033d2..75e4ffeb8cc9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -318,23 +318,33 @@ out:
318} 318}
319 319
320static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, 320static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
321 void __user *useraddr) 321 u32 cmd, void __user *useraddr)
322{ 322{
323 struct ethtool_rxnfc cmd; 323 struct ethtool_rxnfc info;
324 size_t info_size = sizeof(info);
324 325
325 if (!dev->ethtool_ops->set_rxnfc) 326 if (!dev->ethtool_ops->set_rxnfc)
326 return -EOPNOTSUPP; 327 return -EOPNOTSUPP;
327 328
328 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 329 /* struct ethtool_rxnfc was originally defined for
330 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
331 * members. User-space might still be using that
332 * definition. */
333 if (cmd == ETHTOOL_SRXFH)
334 info_size = (offsetof(struct ethtool_rxnfc, data) +
335 sizeof(info.data));
336
337 if (copy_from_user(&info, useraddr, info_size))
329 return -EFAULT; 338 return -EFAULT;
330 339
331 return dev->ethtool_ops->set_rxnfc(dev, &cmd); 340 return dev->ethtool_ops->set_rxnfc(dev, &info);
332} 341}
333 342
334static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, 343static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
335 void __user *useraddr) 344 u32 cmd, void __user *useraddr)
336{ 345{
337 struct ethtool_rxnfc info; 346 struct ethtool_rxnfc info;
347 size_t info_size = sizeof(info);
338 const struct ethtool_ops *ops = dev->ethtool_ops; 348 const struct ethtool_ops *ops = dev->ethtool_ops;
339 int ret; 349 int ret;
340 void *rule_buf = NULL; 350 void *rule_buf = NULL;
@@ -342,13 +352,22 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
342 if (!ops->get_rxnfc) 352 if (!ops->get_rxnfc)
343 return -EOPNOTSUPP; 353 return -EOPNOTSUPP;
344 354
345 if (copy_from_user(&info, useraddr, sizeof(info))) 355 /* struct ethtool_rxnfc was originally defined for
356 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
357 * members. User-space might still be using that
358 * definition. */
359 if (cmd == ETHTOOL_GRXFH)
360 info_size = (offsetof(struct ethtool_rxnfc, data) +
361 sizeof(info.data));
362
363 if (copy_from_user(&info, useraddr, info_size))
346 return -EFAULT; 364 return -EFAULT;
347 365
348 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 366 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
349 if (info.rule_cnt > 0) { 367 if (info.rule_cnt > 0) {
350 rule_buf = kmalloc(info.rule_cnt * sizeof(u32), 368 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
351 GFP_USER); 369 rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
370 GFP_USER);
352 if (!rule_buf) 371 if (!rule_buf)
353 return -ENOMEM; 372 return -ENOMEM;
354 } 373 }
@@ -359,7 +378,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
359 goto err_out; 378 goto err_out;
360 379
361 ret = -EFAULT; 380 ret = -EFAULT;
362 if (copy_to_user(useraddr, &info, sizeof(info))) 381 if (copy_to_user(useraddr, &info, info_size))
363 goto err_out; 382 goto err_out;
364 383
365 if (rule_buf) { 384 if (rule_buf) {
@@ -1516,12 +1535,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1516 case ETHTOOL_GRXCLSRLCNT: 1535 case ETHTOOL_GRXCLSRLCNT:
1517 case ETHTOOL_GRXCLSRULE: 1536 case ETHTOOL_GRXCLSRULE:
1518 case ETHTOOL_GRXCLSRLALL: 1537 case ETHTOOL_GRXCLSRLALL:
1519 rc = ethtool_get_rxnfc(dev, useraddr); 1538 rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
1520 break; 1539 break;
1521 case ETHTOOL_SRXFH: 1540 case ETHTOOL_SRXFH:
1522 case ETHTOOL_SRXCLSRLDEL: 1541 case ETHTOOL_SRXCLSRLDEL:
1523 case ETHTOOL_SRXCLSRLINS: 1542 case ETHTOOL_SRXCLSRLINS:
1524 rc = ethtool_set_rxnfc(dev, useraddr); 1543 rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
1525 break; 1544 break;
1526 case ETHTOOL_GGRO: 1545 case ETHTOOL_GGRO:
1527 rc = ethtool_get_gro(dev, useraddr); 1546 rc = ethtool_get_gro(dev, useraddr);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cf8e70392fe0..785e5276a300 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
107 107
108/* Protects against soft lockup during large deletion */ 108/* Protects against soft lockup during large deletion */
109static struct rb_root est_root = RB_ROOT; 109static struct rb_root est_root = RB_ROOT;
110static DEFINE_SPINLOCK(est_tree_lock);
110 111
111static void est_timer(unsigned long arg) 112static void est_timer(unsigned long arg)
112{ 113{
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
201 * 202 *
202 * Returns 0 on success or a negative error code. 203 * Returns 0 on success or a negative error code.
203 * 204 *
204 * NOTE: Called under rtnl_mutex
205 */ 205 */
206int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 206int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
207 struct gnet_stats_rate_est *rate_est, 207 struct gnet_stats_rate_est *rate_est,
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
232 est->last_packets = bstats->packets; 232 est->last_packets = bstats->packets;
233 est->avpps = rate_est->pps<<10; 233 est->avpps = rate_est->pps<<10;
234 234
235 spin_lock(&est_tree_lock);
235 if (!elist[idx].timer.function) { 236 if (!elist[idx].timer.function) {
236 INIT_LIST_HEAD(&elist[idx].list); 237 INIT_LIST_HEAD(&elist[idx].list);
237 setup_timer(&elist[idx].timer, est_timer, idx); 238 setup_timer(&elist[idx].timer, est_timer, idx);
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
242 243
243 list_add_rcu(&est->list, &elist[idx].list); 244 list_add_rcu(&est->list, &elist[idx].list);
244 gen_add_node(est); 245 gen_add_node(est);
246 spin_unlock(&est_tree_lock);
245 247
246 return 0; 248 return 0;
247} 249}
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
261 * 263 *
262 * Removes the rate estimator specified by &bstats and &rate_est. 264 * Removes the rate estimator specified by &bstats and &rate_est.
263 * 265 *
264 * NOTE: Called under rtnl_mutex
265 */ 266 */
266void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 267void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
267 struct gnet_stats_rate_est *rate_est) 268 struct gnet_stats_rate_est *rate_est)
268{ 269{
269 struct gen_estimator *e; 270 struct gen_estimator *e;
270 271
272 spin_lock(&est_tree_lock);
271 while ((e = gen_find_node(bstats, rate_est))) { 273 while ((e = gen_find_node(bstats, rate_est))) {
272 rb_erase(&e->node, &est_root); 274 rb_erase(&e->node, &est_root);
273 275
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
278 list_del_rcu(&e->list); 280 list_del_rcu(&e->list);
279 call_rcu(&e->e_rcu, __gen_kill_estimator); 281 call_rcu(&e->e_rcu, __gen_kill_estimator);
280 } 282 }
283 spin_unlock(&est_tree_lock);
281} 284}
282EXPORT_SYMBOL(gen_kill_estimator); 285EXPORT_SYMBOL(gen_kill_estimator);
283 286
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
312bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 315bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
313 const struct gnet_stats_rate_est *rate_est) 316 const struct gnet_stats_rate_est *rate_est)
314{ 317{
318 bool res;
319
315 ASSERT_RTNL(); 320 ASSERT_RTNL();
316 321
317 return gen_find_node(bstats, rate_est) != NULL; 322 spin_lock(&est_tree_lock);
323 res = gen_find_node(bstats, rate_est) != NULL;
324 spin_unlock(&est_tree_lock);
325
326 return res;
318} 327}
319EXPORT_SYMBOL(gen_estimator_active); 328EXPORT_SYMBOL(gen_estimator_active);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 6ba1c0eece03..a4e0a7482c2b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -949,7 +949,10 @@ static void neigh_update_hhs(struct neighbour *neigh)
949{ 949{
950 struct hh_cache *hh; 950 struct hh_cache *hh;
951 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 951 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
952 = neigh->dev->header_ops->cache_update; 952 = NULL;
953
954 if (neigh->dev->header_ops)
955 update = neigh->dev->header_ops->cache_update;
953 956
954 if (update) { 957 if (update) {
955 for (hh = neigh->hh; hh; hh = hh->hh_next) { 958 for (hh = neigh->hh; hh; hh = hh->hh_next) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2ad68da418df..1dacd7ba8dbb 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2170 end_time = ktime_now(); 2170 end_time = ktime_now();
2171 2171
2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2173 pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); 2173 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2174} 2174}
2175 2175
2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f8abf68e3988..ce88293a34e2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb);
482 * reference count dropping and cleans up the skbuff as if it 482 * reference count dropping and cleans up the skbuff as if it
483 * just came from __alloc_skb(). 483 * just came from __alloc_skb().
484 */ 484 */
485int skb_recycle_check(struct sk_buff *skb, int skb_size) 485bool skb_recycle_check(struct sk_buff *skb, int skb_size)
486{ 486{
487 struct skb_shared_info *shinfo; 487 struct skb_shared_info *shinfo;
488 488
489 if (irqs_disabled()) 489 if (irqs_disabled())
490 return 0; 490 return false;
491 491
492 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 492 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
493 return 0; 493 return false;
494 494
495 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 495 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
496 if (skb_end_pointer(skb) - skb->head < skb_size) 496 if (skb_end_pointer(skb) - skb->head < skb_size)
497 return 0; 497 return false;
498 498
499 if (skb_shared(skb) || skb_cloned(skb)) 499 if (skb_shared(skb) || skb_cloned(skb))
500 return 0; 500 return false;
501 501
502 skb_release_head_state(skb); 502 skb_release_head_state(skb);
503 503
@@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
509 skb->data = skb->head + NET_SKB_PAD; 509 skb->data = skb->head + NET_SKB_PAD;
510 skb_reset_tail_pointer(skb); 510 skb_reset_tail_pointer(skb);
511 511
512 return 1; 512 return true;
513} 513}
514EXPORT_SYMBOL(skb_recycle_check); 514EXPORT_SYMBOL(skb_recycle_check);
515 515
@@ -532,6 +532,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
532 new->ip_summed = old->ip_summed; 532 new->ip_summed = old->ip_summed;
533 skb_copy_queue_mapping(new, old); 533 skb_copy_queue_mapping(new, old);
534 new->priority = old->priority; 534 new->priority = old->priority;
535 new->deliver_no_wcard = old->deliver_no_wcard;
535#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 536#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
536 new->ipvs_property = old->ipvs_property; 537 new->ipvs_property = old->ipvs_property;
537#endif 538#endif
@@ -569,7 +570,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
569 C(len); 570 C(len);
570 C(data_len); 571 C(data_len);
571 C(mac_len); 572 C(mac_len);
572 C(rxhash);
573 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 573 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
574 n->cloned = 1; 574 n->cloned = 1;
575 n->nohdr = 0; 575 n->nohdr = 0;
@@ -843,7 +843,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
843 skb->network_header += off; 843 skb->network_header += off;
844 if (skb_mac_header_was_set(skb)) 844 if (skb_mac_header_was_set(skb))
845 skb->mac_header += off; 845 skb->mac_header += off;
846 skb->csum_start += nhead; 846 /* Only adjust this if it actually is csum_start rather than csum */
847 if (skb->ip_summed == CHECKSUM_PARTIAL)
848 skb->csum_start += nhead;
847 skb->cloned = 0; 849 skb->cloned = 0;
848 skb->hdr_len = 0; 850 skb->hdr_len = 0;
849 skb->nohdr = 0; 851 skb->nohdr = 0;
@@ -930,7 +932,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
930 copy_skb_header(n, skb); 932 copy_skb_header(n, skb);
931 933
932 off = newheadroom - oldheadroom; 934 off = newheadroom - oldheadroom;
933 n->csum_start += off; 935 if (n->ip_summed == CHECKSUM_PARTIAL)
936 n->csum_start += off;
934#ifdef NET_SKBUFF_DATA_USES_OFFSET 937#ifdef NET_SKBUFF_DATA_USES_OFFSET
935 n->transport_header += off; 938 n->transport_header += off;
936 n->network_header += off; 939 n->network_header += off;
@@ -2965,6 +2968,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2965} 2968}
2966EXPORT_SYMBOL_GPL(skb_cow_data); 2969EXPORT_SYMBOL_GPL(skb_cow_data);
2967 2970
2971static void sock_rmem_free(struct sk_buff *skb)
2972{
2973 struct sock *sk = skb->sk;
2974
2975 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2976}
2977
2978/*
2979 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
2980 */
2981int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2982{
2983 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
2984 (unsigned)sk->sk_rcvbuf)
2985 return -ENOMEM;
2986
2987 skb_orphan(skb);
2988 skb->sk = sk;
2989 skb->destructor = sock_rmem_free;
2990 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2991
2992 skb_queue_tail(&sk->sk_error_queue, skb);
2993 if (!sock_flag(sk, SOCK_DEAD))
2994 sk->sk_data_ready(sk, skb->len);
2995 return 0;
2996}
2997EXPORT_SYMBOL(sock_queue_err_skb);
2998
2968void skb_tstamp_tx(struct sk_buff *orig_skb, 2999void skb_tstamp_tx(struct sk_buff *orig_skb,
2969 struct skb_shared_hwtstamps *hwtstamps) 3000 struct skb_shared_hwtstamps *hwtstamps)
2970{ 3001{
@@ -2996,7 +3027,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
2996 memset(serr, 0, sizeof(*serr)); 3027 memset(serr, 0, sizeof(*serr));
2997 serr->ee.ee_errno = ENOMSG; 3028 serr->ee.ee_errno = ENOMSG;
2998 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3029 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3030
2999 err = sock_queue_err_skb(sk, skb); 3031 err = sock_queue_err_skb(sk, skb);
3032
3000 if (err) 3033 if (err)
3001 kfree_skb(skb); 3034 kfree_skb(skb);
3002} 3035}
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index c51b55400dc5..11201784d29a 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,7 +1,7 @@
1menuconfig NET_DSA 1menuconfig NET_DSA
2 bool "Distributed Switch Architecture support" 2 bool "Distributed Switch Architecture support"
3 default n 3 default n
4 depends on EXPERIMENTAL && !S390 4 depends on EXPERIMENTAL && NET_ETHERNET && !S390
5 select PHYLIB 5 select PHYLIB
6 ---help--- 6 ---help---
7 This allows you to use hardware switch chips that use 7 This allows you to use hardware switch chips that use
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 8e3a1fd938ab..7c3a7d191249 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -303,7 +303,7 @@ config ARPD
303 If unsure, say N. 303 If unsure, say N.
304 304
305config SYN_COOKIES 305config SYN_COOKIES
306 bool "IP: TCP syncookie support (disabled per default)" 306 bool "IP: TCP syncookie support"
307 ---help--- 307 ---help---
308 Normal TCP/IP networking is open to an attack known as "SYN 308 Normal TCP/IP networking is open to an attack known as "SYN
309 flooding". This denial-of-service attack prevents legitimate remote 309 flooding". This denial-of-service attack prevents legitimate remote
@@ -328,13 +328,13 @@ config SYN_COOKIES
328 server is really overloaded. If this happens frequently better turn 328 server is really overloaded. If this happens frequently better turn
329 them off. 329 them off.
330 330
331 If you say Y here, note that SYN cookies aren't enabled by default; 331 If you say Y here, you can disable SYN cookies at run time by
332 you can enable them by saying Y to "/proc file system support" and 332 saying Y to "/proc file system support" and
333 "Sysctl support" below and executing the command 333 "Sysctl support" below and executing the command
334 334
335 echo 1 >/proc/sys/net/ipv4/tcp_syncookies 335 echo 0 > /proc/sys/net/ipv4/tcp_syncookies
336 336
337 at boot time after the /proc file system has been mounted. 337 after the /proc file system has been mounted.
338 338
339 If unsure, say N. 339 If unsure, say N.
340 340
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9a4a6c96cb0d..041d41df1224 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -873,8 +873,10 @@ int ip_append_data(struct sock *sk,
873 !exthdrlen) 873 !exthdrlen)
874 csummode = CHECKSUM_PARTIAL; 874 csummode = CHECKSUM_PARTIAL;
875 875
876 skb = skb_peek_tail(&sk->sk_write_queue);
877
876 inet->cork.length += length; 878 inet->cork.length += length;
877 if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) && 879 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
878 (sk->sk_protocol == IPPROTO_UDP) && 880 (sk->sk_protocol == IPPROTO_UDP) &&
879 (rt->u.dst.dev->features & NETIF_F_UFO)) { 881 (rt->u.dst.dev->features & NETIF_F_UFO)) {
880 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, 882 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
@@ -892,7 +894,7 @@ int ip_append_data(struct sock *sk,
892 * adding appropriate IP header. 894 * adding appropriate IP header.
893 */ 895 */
894 896
895 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) 897 if (!skb)
896 goto alloc_new_skb; 898 goto alloc_new_skb;
897 899
898 while (length > 0) { 900 while (length > 0) {
@@ -1121,7 +1123,8 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1121 return -EINVAL; 1123 return -EINVAL;
1122 1124
1123 inet->cork.length += size; 1125 inet->cork.length += size;
1124 if ((sk->sk_protocol == IPPROTO_UDP) && 1126 if ((size + skb->len > mtu) &&
1127 (sk->sk_protocol == IPPROTO_UDP) &&
1125 (rt->u.dst.dev->features & NETIF_F_UFO)) { 1128 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1126 skb_shinfo(skb)->gso_size = mtu - fragheaderlen; 1129 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1127 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1130 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 856123fe32f9..7f6273506eea 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -267,8 +267,10 @@ static void __net_exit ipmr_rules_exit(struct net *net)
267{ 267{
268 struct mr_table *mrt, *next; 268 struct mr_table *mrt, *next;
269 269
270 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) 270 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
271 list_del(&mrt->list);
271 kfree(mrt); 272 kfree(mrt);
273 }
272 fib_rules_unregister(net->ipv4.mr_rules_ops); 274 fib_rules_unregister(net->ipv4.mr_rules_ops);
273} 275}
274#else 276#else
@@ -440,8 +442,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
440 int err; 442 int err;
441 443
442 err = ipmr_fib_lookup(net, &fl, &mrt); 444 err = ipmr_fib_lookup(net, &fl, &mrt);
443 if (err < 0) 445 if (err < 0) {
446 kfree_skb(skb);
444 return err; 447 return err;
448 }
445 449
446 read_lock(&mrt_lock); 450 read_lock(&mrt_lock);
447 dev->stats.tx_bytes += skb->len; 451 dev->stats.tx_bytes += skb->len;
@@ -1726,8 +1730,10 @@ int ip_mr_input(struct sk_buff *skb)
1726 goto dont_forward; 1730 goto dont_forward;
1727 1731
1728 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); 1732 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
1729 if (err < 0) 1733 if (err < 0) {
1734 kfree_skb(skb);
1730 return err; 1735 return err;
1736 }
1731 1737
1732 if (!local) { 1738 if (!local) {
1733 if (IPCB(skb)->opt.router_alert) { 1739 if (IPCB(skb)->opt.router_alert) {
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 63958f3394a5..4b6c5ca610fc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb,
336 cpu = smp_processor_id(); 336 cpu = smp_processor_id();
337 table_base = private->entries[cpu]; 337 table_base = private->entries[cpu];
338 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; 338 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
339 stackptr = &private->stackptr[cpu]; 339 stackptr = per_cpu_ptr(private->stackptr, cpu);
340 origptr = *stackptr; 340 origptr = *stackptr;
341 341
342 e = get_entry(table_base, private->hook_entry[hook]); 342 e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 5c24db4a3c91..9f6b22206c52 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -347,7 +347,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
347 { .sport = th->dest, 347 { .sport = th->dest,
348 .dport = th->source } } }; 348 .dport = th->source } } };
349 security_req_classify_flow(req, &fl); 349 security_req_classify_flow(req, &fl);
350 if (ip_route_output_key(&init_net, &rt, &fl)) { 350 if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
351 reqsk_free(req); 351 reqsk_free(req);
352 goto out; 352 goto out;
353 } 353 }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6596b4feeddc..65afeaec15b7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -608,6 +608,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
608 ssize_t spliced; 608 ssize_t spliced;
609 int ret; 609 int ret;
610 610
611 sock_rps_record_flow(sk);
611 /* 612 /*
612 * We can't seek on a socket input 613 * We can't seek on a socket input
613 */ 614 */
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index c209e054a634..377bc9349371 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
126 * calculate 2^fract in a <<7 value. 126 * calculate 2^fract in a <<7 value.
127 */ 127 */
128 is_slowstart = 1; 128 is_slowstart = 1;
129 increment = ((1 << ca->rho) * hybla_fraction(rho_fractions)) 129 increment = ((1 << min(ca->rho, 16U)) *
130 - 128; 130 hybla_fraction(rho_fractions)) - 128;
131 } else { 131 } else {
132 /* 132 /*
133 * congestion avoidance 133 * congestion avoidance
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3e6dafcb1071..548d575e6cc6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
2639 if (sk->sk_family == AF_INET) { 2639 if (sk->sk_family == AF_INET) {
2640 printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2640 printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
2641 msg, 2641 msg,
2642 &inet->daddr, ntohs(inet->dport), 2642 &inet->inet_daddr, ntohs(inet->inet_dport),
2643 tp->snd_cwnd, tcp_left_out(tp), 2643 tp->snd_cwnd, tcp_left_out(tp),
2644 tp->snd_ssthresh, tp->prior_ssthresh, 2644 tp->snd_ssthresh, tp->prior_ssthresh,
2645 tp->packets_out); 2645 tp->packets_out);
@@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
2649 struct ipv6_pinfo *np = inet6_sk(sk); 2649 struct ipv6_pinfo *np = inet6_sk(sk);
2650 printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2650 printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
2651 msg, 2651 msg,
2652 &np->daddr, ntohs(inet->dport), 2652 &np->daddr, ntohs(inet->inet_dport),
2653 tp->snd_cwnd, tcp_left_out(tp), 2653 tp->snd_cwnd, tcp_left_out(tp),
2654 tp->snd_ssthresh, tp->prior_ssthresh, 2654 tp->snd_ssthresh, tp->prior_ssthresh,
2655 tp->packets_out); 2655 tp->packets_out);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 202cf09c4cd4..fe193e53af44 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1555,6 +1555,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1555#endif 1555#endif
1556 1556
1557 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1557 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1558 sock_rps_save_rxhash(sk, skb->rxhash);
1558 TCP_CHECK_TIMER(sk); 1559 TCP_CHECK_TIMER(sk);
1559 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1560 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1560 rsk = sk; 1561 rsk = sk;
@@ -1579,7 +1580,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1579 } 1580 }
1580 return 0; 1581 return 0;
1581 } 1582 }
1582 } 1583 } else
1584 sock_rps_save_rxhash(sk, skb->rxhash);
1585
1583 1586
1584 TCP_CHECK_TIMER(sk); 1587 TCP_CHECK_TIMER(sk);
1585 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { 1588 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
@@ -1672,8 +1675,6 @@ process:
1672 1675
1673 skb->dev = NULL; 1676 skb->dev = NULL;
1674 1677
1675 sock_rps_save_rxhash(sk, skb->rxhash);
1676
1677 bh_lock_sock_nested(sk); 1678 bh_lock_sock_nested(sk);
1678 ret = 0; 1679 ret = 0;
1679 if (!sock_owned_by_user(sk)) { 1680 if (!sock_owned_by_user(sk)) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b4ed957f201a..7ed9dc1042d1 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2208,6 +2208,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2208 int mib_idx; 2208 int mib_idx;
2209 int fwd_rexmitting = 0; 2209 int fwd_rexmitting = 0;
2210 2210
2211 if (!tp->packets_out)
2212 return;
2213
2211 if (!tp->lost_out) 2214 if (!tp->lost_out)
2212 tp->retransmit_high = tp->snd_una; 2215 tp->retransmit_high = tp->snd_una;
2213 2216
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 58585748bdac..eec4ff456e33 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
633 if (!inet->recverr) { 633 if (!inet->recverr) {
634 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 634 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
635 goto out; 635 goto out;
636 } else { 636 } else
637 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 637 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
638 } 638
639 sk->sk_err = err; 639 sk->sk_err = err;
640 sk->sk_error_report(sk); 640 sk->sk_error_report(sk);
641out: 641out:
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 1705476670ef..23883a48ebfb 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -108,6 +108,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
108 u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 108 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
109 109
110 memset(fl, 0, sizeof(struct flowi)); 110 memset(fl, 0, sizeof(struct flowi));
111 fl->mark = skb->mark;
112
111 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) { 113 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
112 switch (iph->protocol) { 114 switch (iph->protocol) {
113 case IPPROTO_UDP: 115 case IPPROTO_UDP:
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e1a698df5706..784f34d11fdd 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1760,7 +1760,10 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
1760 1760
1761 idev = ipv6_find_idev(dev); 1761 idev = ipv6_find_idev(dev);
1762 if (!idev) 1762 if (!idev)
1763 return NULL; 1763 return ERR_PTR(-ENOBUFS);
1764
1765 if (idev->cnf.disable_ipv6)
1766 return ERR_PTR(-EACCES);
1764 1767
1765 /* Add default multicast route */ 1768 /* Add default multicast route */
1766 addrconf_add_mroute(dev); 1769 addrconf_add_mroute(dev);
@@ -2129,8 +2132,9 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2129 if (!dev) 2132 if (!dev)
2130 return -ENODEV; 2133 return -ENODEV;
2131 2134
2132 if ((idev = addrconf_add_dev(dev)) == NULL) 2135 idev = addrconf_add_dev(dev);
2133 return -ENOBUFS; 2136 if (IS_ERR(idev))
2137 return PTR_ERR(idev);
2134 2138
2135 scope = ipv6_addr_scope(pfx); 2139 scope = ipv6_addr_scope(pfx);
2136 2140
@@ -2377,7 +2381,7 @@ static void addrconf_dev_config(struct net_device *dev)
2377 } 2381 }
2378 2382
2379 idev = addrconf_add_dev(dev); 2383 idev = addrconf_add_dev(dev);
2380 if (idev == NULL) 2384 if (IS_ERR(idev))
2381 return; 2385 return;
2382 2386
2383 memset(&addr, 0, sizeof(struct in6_addr)); 2387 memset(&addr, 0, sizeof(struct in6_addr));
@@ -2468,7 +2472,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
2468 ASSERT_RTNL(); 2472 ASSERT_RTNL();
2469 2473
2470 idev = addrconf_add_dev(dev); 2474 idev = addrconf_add_dev(dev);
2471 if (!idev) { 2475 if (IS_ERR(idev)) {
2472 printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); 2476 printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
2473 return; 2477 return;
2474 } 2478 }
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index ce7992982557..03e62f94ff8e 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -483,7 +483,7 @@ route_done:
483 np->tclass, NULL, &fl, (struct rt6_info*)dst, 483 np->tclass, NULL, &fl, (struct rt6_info*)dst,
484 MSG_DONTWAIT, np->dontfrag); 484 MSG_DONTWAIT, np->dontfrag);
485 if (err) { 485 if (err) {
486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
487 ip6_flush_pending_frames(sk); 487 ip6_flush_pending_frames(sk);
488 goto out_put; 488 goto out_put;
489 } 489 }
@@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
565 np->dontfrag); 565 np->dontfrag);
566 566
567 if (err) { 567 if (err) {
568 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 568 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
569 ip6_flush_pending_frames(sk); 569 ip6_flush_pending_frames(sk);
570 goto out_put; 570 goto out_put;
571 } 571 }
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 073071f2b75b..66078dad7fe8 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -120,7 +120,7 @@ static void mroute_clean_tables(struct mr6_table *mrt);
120static void ipmr_expire_process(unsigned long arg); 120static void ipmr_expire_process(unsigned long arg);
121 121
122#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 122#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
123#define ip6mr_for_each_table(mrt, met) \ 123#define ip6mr_for_each_table(mrt, net) \
124 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) 124 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
125 125
126static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) 126static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
@@ -254,8 +254,10 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
254{ 254{
255 struct mr6_table *mrt, *next; 255 struct mr6_table *mrt, *next;
256 256
257 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) 257 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
258 list_del(&mrt->list);
258 ip6mr_free_table(mrt); 259 ip6mr_free_table(mrt);
260 }
259 fib_rules_unregister(net->ipv6.mr6_rules_ops); 261 fib_rules_unregister(net->ipv6.mr6_rules_ops);
260} 262}
261#else 263#else
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 59f1881968c7..ab1622d7d409 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1356,7 +1356,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
1356 IPV6_TLV_PADN, 0 }; 1356 IPV6_TLV_PADN, 0 };
1357 1357
1358 /* we assume size > sizeof(ra) here */ 1358 /* we assume size > sizeof(ra) here */
1359 skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); 1359 size += LL_ALLOCATED_SPACE(dev);
1360 /* limit our allocations to order-0 page */
1361 size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1362 skb = sock_alloc_send_skb(sk, size, 1, &err);
1360 1363
1361 if (!skb) 1364 if (!skb)
1362 return NULL; 1365 return NULL;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 2794b6002836..d6e9599d0705 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -347,11 +347,12 @@ static const struct xfrm_type mip6_destopt_type =
347 347
348static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) 348static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
349{ 349{
350 struct ipv6hdr *iph = ipv6_hdr(skb);
350 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data; 351 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
351 int err = rt2->rt_hdr.nexthdr; 352 int err = rt2->rt_hdr.nexthdr;
352 353
353 spin_lock(&x->lock); 354 spin_lock(&x->lock);
354 if (!ipv6_addr_equal(&rt2->addr, (struct in6_addr *)x->coaddr) && 355 if (!ipv6_addr_equal(&iph->daddr, (struct in6_addr *)x->coaddr) &&
355 !ipv6_addr_any((struct in6_addr *)x->coaddr)) 356 !ipv6_addr_any((struct in6_addr *)x->coaddr))
356 err = -ENOENT; 357 err = -ENOENT;
357 spin_unlock(&x->lock); 358 spin_unlock(&x->lock);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0abdc242ddb7..2efef52fb461 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -586,6 +586,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
586 src_addr = solicited_addr; 586 src_addr = solicited_addr;
587 if (ifp->flags & IFA_F_OPTIMISTIC) 587 if (ifp->flags & IFA_F_OPTIMISTIC)
588 override = 0; 588 override = 0;
589 inc_opt |= ifp->idev->cnf.force_tllao;
589 in6_ifa_put(ifp); 590 in6_ifa_put(ifp);
590 } else { 591 } else {
591 if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, 592 if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
@@ -599,7 +600,6 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
599 icmp6h.icmp6_solicited = solicited; 600 icmp6h.icmp6_solicited = solicited;
600 icmp6h.icmp6_override = override; 601 icmp6h.icmp6_override = override;
601 602
602 inc_opt |= ifp->idev->cnf.force_tllao;
603 __ndisc_send(dev, neigh, daddr, src_addr, 603 __ndisc_send(dev, neigh, daddr, src_addr,
604 &icmp6h, solicited_addr, 604 &icmp6h, solicited_addr,
605 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); 605 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 6f517bd83692..9d2d68f0e605 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb,
363 cpu = smp_processor_id(); 363 cpu = smp_processor_id();
364 table_base = private->entries[cpu]; 364 table_base = private->entries[cpu];
365 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; 365 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
366 stackptr = &private->stackptr[cpu]; 366 stackptr = per_cpu_ptr(private->stackptr, cpu);
367 origptr = *stackptr; 367 origptr = *stackptr;
368 368
369 e = get_entry(table_base, private->hook_entry[hook]); 369 e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 47d227713758..2933396e0281 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -97,9 +97,11 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
97 fl.fl_ip_dport = otcph.source; 97 fl.fl_ip_dport = otcph.source;
98 security_skb_classify_flow(oldskb, &fl); 98 security_skb_classify_flow(oldskb, &fl);
99 dst = ip6_route_output(net, NULL, &fl); 99 dst = ip6_route_output(net, NULL, &fl);
100 if (dst == NULL) 100 if (dst == NULL || dst->error) {
101 dst_release(dst);
101 return; 102 return;
102 if (dst->error || xfrm_lookup(net, &dst, &fl, NULL, 0)) 103 }
104 if (xfrm_lookup(net, &dst, &fl, NULL, 0))
103 return; 105 return;
104 106
105 hh_len = (dst->dev->hard_header_len + 15)&~15; 107 hh_len = (dst->dev->hard_header_len + 15)&~15;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 294cbe8b0725..252d76199c41 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
814{ 814{
815 int flags = 0; 815 int flags = 0;
816 816
817 if (fl->oif || rt6_need_strict(&fl->fl6_dst)) 817 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst))
818 flags |= RT6_LOOKUP_F_IFACE; 818 flags |= RT6_LOOKUP_F_IFACE;
819 819
820 if (!ipv6_addr_any(&fl->fl6_src)) 820 if (!ipv6_addr_any(&fl->fl6_src))
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4a0e77e14468..6baeabbbca82 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,6 +124,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
124 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 124 u8 nexthdr = nh[IP6CB(skb)->nhoff];
125 125
126 memset(fl, 0, sizeof(struct flowi)); 126 memset(fl, 0, sizeof(struct flowi));
127 fl->mark = skb->mark;
128
127 ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr); 129 ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr);
128 ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr); 130 ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr);
129 131
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c163d0a149f4..98258b7341e3 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -332,14 +332,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
332 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 332 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
333 333
334 spin_unlock(&local->ampdu_lock); 334 spin_unlock(&local->ampdu_lock);
335 spin_unlock_bh(&sta->lock);
336 335
337 /* send an addBA request */ 336 /* prepare tid data */
338 sta->ampdu_mlme.dialog_token_allocator++; 337 sta->ampdu_mlme.dialog_token_allocator++;
339 sta->ampdu_mlme.tid_tx[tid]->dialog_token = 338 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
340 sta->ampdu_mlme.dialog_token_allocator; 339 sta->ampdu_mlme.dialog_token_allocator;
341 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; 340 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
342 341
342 spin_unlock_bh(&sta->lock);
343
344 /* send AddBA request */
343 ieee80211_send_addba_request(sdata, pubsta->addr, tid, 345 ieee80211_send_addba_request(sdata, pubsta->addr, tid,
344 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 346 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
345 sta->ampdu_mlme.tid_tx[tid]->ssn, 347 sta->ampdu_mlme.tid_tx[tid]->ssn,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c7000a6ca379..67ee34f57df7 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -632,7 +632,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
632 skb->dev = sta->sdata->dev; 632 skb->dev = sta->sdata->dev;
633 skb->protocol = eth_type_trans(skb, sta->sdata->dev); 633 skb->protocol = eth_type_trans(skb, sta->sdata->dev);
634 memset(skb->cb, 0, sizeof(skb->cb)); 634 memset(skb->cb, 0, sizeof(skb->cb));
635 netif_rx(skb); 635 netif_rx_ni(skb);
636} 636}
637 637
638static void sta_apply_parameters(struct ieee80211_local *local, 638static void sta_apply_parameters(struct ieee80211_local *local,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 5d218c530a4e..32be11e4c4d9 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -5,7 +5,7 @@
5#include <linux/nl80211.h> 5#include <linux/nl80211.h>
6#include "ieee80211_i.h" 6#include "ieee80211_i.h"
7 7
8enum ieee80211_chan_mode 8static enum ieee80211_chan_mode
9__ieee80211_get_channel_mode(struct ieee80211_local *local, 9__ieee80211_get_channel_mode(struct ieee80211_local *local,
10 struct ieee80211_sub_if_data *ignore) 10 struct ieee80211_sub_if_data *ignore)
11{ 11{
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 4f2271316650..9c1da0809160 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -349,7 +349,7 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx,
349 struct survey_info *survey) 349 struct survey_info *survey)
350{ 350{
351 int ret = -EOPNOTSUPP; 351 int ret = -EOPNOTSUPP;
352 if (local->ops->conf_tx) 352 if (local->ops->get_survey)
353 ret = local->ops->get_survey(&local->hw, idx, survey); 353 ret = local->ops->get_survey(&local->hw, idx, survey);
354 /* trace_drv_get_survey(local, idx, survey, ret); */ 354 /* trace_drv_get_survey(local, idx, survey, ret); */
355 return ret; 355 return ret;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0839c4e8fd2e..f803f8b72a93 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1692,14 +1692,52 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1692 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); 1692 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
1693 break; 1693 break;
1694 case IEEE80211_STYPE_ACTION: 1694 case IEEE80211_STYPE_ACTION:
1695 if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 1695 switch (mgmt->u.action.category) {
1696 case WLAN_CATEGORY_BACK: {
1697 struct ieee80211_local *local = sdata->local;
1698 int len = skb->len;
1699 struct sta_info *sta;
1700
1701 rcu_read_lock();
1702 sta = sta_info_get(sdata, mgmt->sa);
1703 if (!sta) {
1704 rcu_read_unlock();
1705 break;
1706 }
1707
1708 local_bh_disable();
1709
1710 switch (mgmt->u.action.u.addba_req.action_code) {
1711 case WLAN_ACTION_ADDBA_REQ:
1712 if (len < (IEEE80211_MIN_ACTION_SIZE +
1713 sizeof(mgmt->u.action.u.addba_req)))
1714 break;
1715 ieee80211_process_addba_request(local, sta, mgmt, len);
1716 break;
1717 case WLAN_ACTION_ADDBA_RESP:
1718 if (len < (IEEE80211_MIN_ACTION_SIZE +
1719 sizeof(mgmt->u.action.u.addba_resp)))
1720 break;
1721 ieee80211_process_addba_resp(local, sta, mgmt, len);
1722 break;
1723 case WLAN_ACTION_DELBA:
1724 if (len < (IEEE80211_MIN_ACTION_SIZE +
1725 sizeof(mgmt->u.action.u.delba)))
1726 break;
1727 ieee80211_process_delba(sdata, sta, mgmt, len);
1728 break;
1729 }
1730 local_bh_enable();
1731 rcu_read_unlock();
1696 break; 1732 break;
1697 1733 }
1698 ieee80211_sta_process_chanswitch(sdata, 1734 case WLAN_CATEGORY_SPECTRUM_MGMT:
1699 &mgmt->u.action.u.chan_switch.sw_elem, 1735 ieee80211_sta_process_chanswitch(sdata,
1700 (void *)ifmgd->associated->priv, 1736 &mgmt->u.action.u.chan_switch.sw_elem,
1701 rx_status->mactime); 1737 (void *)ifmgd->associated->priv,
1702 break; 1738 rx_status->mactime);
1739 break;
1740 }
1703 } 1741 }
1704 mutex_unlock(&ifmgd->mtx); 1742 mutex_unlock(&ifmgd->mtx);
1705 1743
@@ -1722,9 +1760,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1722 mutex_unlock(&ifmgd->mtx); 1760 mutex_unlock(&ifmgd->mtx);
1723 1761
1724 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && 1762 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
1725 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) 1763 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) {
1726 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 1764 struct ieee80211_local *local = sdata->local;
1765 struct ieee80211_work *wk;
1766
1767 mutex_lock(&local->work_mtx);
1768 list_for_each_entry(wk, &local->work_list, list) {
1769 if (wk->sdata != sdata)
1770 continue;
1771
1772 if (wk->type != IEEE80211_WORK_ASSOC)
1773 continue;
1774
1775 if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN))
1776 continue;
1777 if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN))
1778 continue;
1727 1779
1780 /*
1781 * Printing the message only here means we can't
1782 * spuriously print it, but it also means that it
1783 * won't be printed when the frame comes in before
1784 * we even tried to associate or in similar cases.
1785 *
1786 * Ultimately, I suspect cfg80211 should print the
1787 * messages instead.
1788 */
1789 printk(KERN_DEBUG
1790 "%s: deauthenticated from %pM (Reason: %u)\n",
1791 sdata->name, mgmt->bssid,
1792 le16_to_cpu(mgmt->u.deauth.reason_code));
1793
1794 list_del_rcu(&wk->list);
1795 free_work(wk);
1796 break;
1797 }
1798 mutex_unlock(&local->work_mtx);
1799
1800 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
1801 }
1728 out: 1802 out:
1729 kfree_skb(skb); 1803 kfree_skb(skb);
1730} 1804}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6e2a7bcd8cb8..be9abc2e6348 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1818,17 +1818,26 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1818 return RX_CONTINUE; 1818 return RX_CONTINUE;
1819 1819
1820 if (ieee80211_is_back_req(bar->frame_control)) { 1820 if (ieee80211_is_back_req(bar->frame_control)) {
1821 struct {
1822 __le16 control, start_seq_num;
1823 } __packed bar_data;
1824
1821 if (!rx->sta) 1825 if (!rx->sta)
1822 return RX_DROP_MONITOR; 1826 return RX_DROP_MONITOR;
1827
1828 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
1829 &bar_data, sizeof(bar_data)))
1830 return RX_DROP_MONITOR;
1831
1823 spin_lock(&rx->sta->lock); 1832 spin_lock(&rx->sta->lock);
1824 tid = le16_to_cpu(bar->control) >> 12; 1833 tid = le16_to_cpu(bar_data.control) >> 12;
1825 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { 1834 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
1826 spin_unlock(&rx->sta->lock); 1835 spin_unlock(&rx->sta->lock);
1827 return RX_DROP_MONITOR; 1836 return RX_DROP_MONITOR;
1828 } 1837 }
1829 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; 1838 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1830 1839
1831 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1840 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
1832 1841
1833 /* reset session timer */ 1842 /* reset session timer */
1834 if (tid_agg_rx->timeout) 1843 if (tid_agg_rx->timeout)
@@ -1935,6 +1944,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1935 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 1944 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1936 break; 1945 break;
1937 1946
1947 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1948 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1949
1938 switch (mgmt->u.action.u.addba_req.action_code) { 1950 switch (mgmt->u.action.u.addba_req.action_code) {
1939 case WLAN_ACTION_ADDBA_REQ: 1951 case WLAN_ACTION_ADDBA_REQ:
1940 if (len < (IEEE80211_MIN_ACTION_SIZE + 1952 if (len < (IEEE80211_MIN_ACTION_SIZE +
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index be3d4a698692..b025dc7bb0fd 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -715,7 +715,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
715 struct ieee80211_rx_status *rx_status; 715 struct ieee80211_rx_status *rx_status;
716 struct ieee80211_mgmt *mgmt; 716 struct ieee80211_mgmt *mgmt;
717 struct ieee80211_work *wk; 717 struct ieee80211_work *wk;
718 enum work_action rma; 718 enum work_action rma = WORK_ACT_NONE;
719 u16 fc; 719 u16 fc;
720 720
721 rx_status = (struct ieee80211_rx_status *) skb->cb; 721 rx_status = (struct ieee80211_rx_status *) skb->cb;
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index d8f7e8ef67b4..ff04e9edbed6 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -162,6 +162,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
162 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); 162 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
163 163
164 ct_write_lock(hash); 164 ct_write_lock(hash);
165 spin_lock(&cp->lock);
165 166
166 if (!(cp->flags & IP_VS_CONN_F_HASHED)) { 167 if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
167 list_add(&cp->c_list, &ip_vs_conn_tab[hash]); 168 list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
@@ -174,6 +175,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
174 ret = 0; 175 ret = 0;
175 } 176 }
176 177
178 spin_unlock(&cp->lock);
177 ct_write_unlock(hash); 179 ct_write_unlock(hash);
178 180
179 return ret; 181 return ret;
@@ -193,6 +195,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
193 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); 195 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
194 196
195 ct_write_lock(hash); 197 ct_write_lock(hash);
198 spin_lock(&cp->lock);
196 199
197 if (cp->flags & IP_VS_CONN_F_HASHED) { 200 if (cp->flags & IP_VS_CONN_F_HASHED) {
198 list_del(&cp->c_list); 201 list_del(&cp->c_list);
@@ -202,6 +205,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
202 } else 205 } else
203 ret = 0; 206 ret = 0;
204 207
208 spin_unlock(&cp->lock);
205 ct_write_unlock(hash); 209 ct_write_unlock(hash);
206 210
207 return ret; 211 return ret;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 445de702b8b7..e34622fa0003 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info)
699 vfree(info->jumpstack); 699 vfree(info->jumpstack);
700 else 700 else
701 kfree(info->jumpstack); 701 kfree(info->jumpstack);
702 if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE) 702
703 vfree(info->stackptr); 703 free_percpu(info->stackptr);
704 else
705 kfree(info->stackptr);
706 704
707 kfree(info); 705 kfree(info);
708} 706}
@@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
753 unsigned int size; 751 unsigned int size;
754 int cpu; 752 int cpu;
755 753
756 size = sizeof(unsigned int) * nr_cpu_ids; 754 i->stackptr = alloc_percpu(unsigned int);
757 if (size > PAGE_SIZE)
758 i->stackptr = vmalloc(size);
759 else
760 i->stackptr = kmalloc(size, GFP_KERNEL);
761 if (i->stackptr == NULL) 755 if (i->stackptr == NULL)
762 return -ENOMEM; 756 return -ENOMEM;
763 memset(i->stackptr, 0, size);
764 757
765 size = sizeof(void **) * nr_cpu_ids; 758 size = sizeof(void **) * nr_cpu_ids;
766 if (size > PAGE_SIZE) 759 if (size > PAGE_SIZE)
@@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net,
844 struct xt_table_info *private; 837 struct xt_table_info *private;
845 struct xt_table *t, *table; 838 struct xt_table *t, *table;
846 839
847 ret = xt_jumpstack_alloc(newinfo);
848 if (ret < 0)
849 return ERR_PTR(ret);
850
851 /* Don't add one object to multiple lists. */ 840 /* Don't add one object to multiple lists. */
852 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); 841 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
853 if (!table) { 842 if (!table) {
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 7b048a35ca58..b2a3ae6cad78 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -698,6 +698,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
698 newsk = NULL; 698 newsk = NULL;
699 goto out; 699 goto out;
700 } 700 }
701 kfree_skb(oskb);
701 702
702 sock_hold(sk); 703 sock_hold(sk);
703 pep_sk(newsk)->listener = sk; 704 pep_sk(newsk)->listener = sk;
@@ -1045,12 +1046,12 @@ static void pep_sock_unhash(struct sock *sk)
1045 lock_sock(sk); 1046 lock_sock(sk);
1046 if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { 1047 if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {
1047 skparent = pn->listener; 1048 skparent = pn->listener;
1048 sk_del_node_init(sk);
1049 release_sock(sk); 1049 release_sock(sk);
1050 1050
1051 sk = skparent;
1052 pn = pep_sk(skparent); 1051 pn = pep_sk(skparent);
1053 lock_sock(sk); 1052 lock_sock(skparent);
1053 sk_del_node_init(sk);
1054 sk = skparent;
1054 } 1055 }
1055 /* Unhash a listening sock only when it is closed 1056 /* Unhash a listening sock only when it is closed
1056 * and all of its active connected pipes are closed. */ 1057 * and all of its active connected pipes are closed. */
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 10ed0d55f759..f68832798db2 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
475 err = rds_ib_setup_qp(conn); 475 err = rds_ib_setup_qp(conn);
476 if (err) { 476 if (err) {
477 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); 477 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
478 mutex_unlock(&conn->c_cm_lock);
478 goto out; 479 goto out;
479 } 480 }
480 481
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index a9d951b4fbae..b5dd6ac39be8 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
452 err = rds_iw_setup_qp(conn); 452 err = rds_iw_setup_qp(conn);
453 if (err) { 453 if (err) {
454 rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); 454 rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err);
455 mutex_unlock(&conn->c_cm_lock);
455 goto out; 456 goto out;
456 } 457 }
457 458
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index c0b6863e3b87..1980b71c283f 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -33,6 +33,7 @@
33static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1]; 33static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1];
34static u32 mirred_idx_gen; 34static u32 mirred_idx_gen;
35static DEFINE_RWLOCK(mirred_lock); 35static DEFINE_RWLOCK(mirred_lock);
36static LIST_HEAD(mirred_list);
36 37
37static struct tcf_hashinfo mirred_hash_info = { 38static struct tcf_hashinfo mirred_hash_info = {
38 .htab = tcf_mirred_ht, 39 .htab = tcf_mirred_ht,
@@ -47,7 +48,9 @@ static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
47 m->tcf_bindcnt--; 48 m->tcf_bindcnt--;
48 m->tcf_refcnt--; 49 m->tcf_refcnt--;
49 if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { 50 if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
50 dev_put(m->tcfm_dev); 51 list_del(&m->tcfm_list);
52 if (m->tcfm_dev)
53 dev_put(m->tcfm_dev);
51 tcf_hash_destroy(&m->common, &mirred_hash_info); 54 tcf_hash_destroy(&m->common, &mirred_hash_info);
52 return 1; 55 return 1;
53 } 56 }
@@ -134,8 +137,10 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
134 m->tcfm_ok_push = ok_push; 137 m->tcfm_ok_push = ok_push;
135 } 138 }
136 spin_unlock_bh(&m->tcf_lock); 139 spin_unlock_bh(&m->tcf_lock);
137 if (ret == ACT_P_CREATED) 140 if (ret == ACT_P_CREATED) {
141 list_add(&m->tcfm_list, &mirred_list);
138 tcf_hash_insert(pc, &mirred_hash_info); 142 tcf_hash_insert(pc, &mirred_hash_info);
143 }
139 144
140 return ret; 145 return ret;
141} 146}
@@ -162,9 +167,14 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
162 m->tcf_tm.lastuse = jiffies; 167 m->tcf_tm.lastuse = jiffies;
163 168
164 dev = m->tcfm_dev; 169 dev = m->tcfm_dev;
170 if (!dev) {
171 printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
172 goto out;
173 }
174
165 if (!(dev->flags & IFF_UP)) { 175 if (!(dev->flags & IFF_UP)) {
166 if (net_ratelimit()) 176 if (net_ratelimit())
167 pr_notice("tc mirred to Houston: device %s is gone!\n", 177 pr_notice("tc mirred to Houston: device %s is down\n",
168 dev->name); 178 dev->name);
169 goto out; 179 goto out;
170 } 180 }
@@ -232,6 +242,28 @@ nla_put_failure:
232 return -1; 242 return -1;
233} 243}
234 244
245static int mirred_device_event(struct notifier_block *unused,
246 unsigned long event, void *ptr)
247{
248 struct net_device *dev = ptr;
249 struct tcf_mirred *m;
250
251 if (event == NETDEV_UNREGISTER)
252 list_for_each_entry(m, &mirred_list, tcfm_list) {
253 if (m->tcfm_dev == dev) {
254 dev_put(dev);
255 m->tcfm_dev = NULL;
256 }
257 }
258
259 return NOTIFY_DONE;
260}
261
262static struct notifier_block mirred_device_notifier = {
263 .notifier_call = mirred_device_event,
264};
265
266
235static struct tc_action_ops act_mirred_ops = { 267static struct tc_action_ops act_mirred_ops = {
236 .kind = "mirred", 268 .kind = "mirred",
237 .hinfo = &mirred_hash_info, 269 .hinfo = &mirred_hash_info,
@@ -252,12 +284,17 @@ MODULE_LICENSE("GPL");
252 284
253static int __init mirred_init_module(void) 285static int __init mirred_init_module(void)
254{ 286{
287 int err = register_netdevice_notifier(&mirred_device_notifier);
288 if (err)
289 return err;
290
255 pr_info("Mirror/redirect action on\n"); 291 pr_info("Mirror/redirect action on\n");
256 return tcf_register_action(&act_mirred_ops); 292 return tcf_register_action(&act_mirred_ops);
257} 293}
258 294
259static void __exit mirred_cleanup_module(void) 295static void __exit mirred_cleanup_module(void)
260{ 296{
297 unregister_netdevice_notifier(&mirred_device_notifier);
261 tcf_unregister_action(&act_mirred_ops); 298 tcf_unregister_action(&act_mirred_ops);
262} 299}
263 300
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index d885ba311564..724553e8ed7b 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
159 iph->daddr = new_addr; 159 iph->daddr = new_addr;
160 160
161 csum_replace4(&iph->check, addr, new_addr); 161 csum_replace4(&iph->check, addr, new_addr);
162 } else if ((iph->frag_off & htons(IP_OFFSET)) ||
163 iph->protocol != IPPROTO_ICMP) {
164 goto out;
162 } 165 }
163 166
164 ihl = iph->ihl * 4; 167 ihl = iph->ihl * 4;
@@ -202,7 +205,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
202 { 205 {
203 struct icmphdr *icmph; 206 struct icmphdr *icmph;
204 207
205 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) 208 if (!pskb_may_pull(skb, ihl + sizeof(*icmph)))
206 goto drop; 209 goto drop;
207 210
208 icmph = (void *)(skb_network_header(skb) + ihl); 211 icmph = (void *)(skb_network_header(skb) + ihl);
@@ -212,6 +215,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
212 (icmph->type != ICMP_PARAMETERPROB)) 215 (icmph->type != ICMP_PARAMETERPROB))
213 break; 216 break;
214 217
218 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
219 goto drop;
220
215 iph = (void *)(icmph + 1); 221 iph = (void *)(icmph + 1);
216 if (egress) 222 if (egress)
217 addr = iph->daddr; 223 addr = iph->daddr;
@@ -247,6 +253,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
247 break; 253 break;
248 } 254 }
249 255
256out:
250 return action; 257 return action;
251 258
252drop: 259drop:
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index fdbd0b7bd840..50e3d945e1f4 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -125,7 +125,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
125{ 125{
126 struct tcf_pedit *p = a->priv; 126 struct tcf_pedit *p = a->priv;
127 int i, munged = 0; 127 int i, munged = 0;
128 u8 *pptr; 128 unsigned int off;
129 129
130 if (!(skb->tc_verd & TC_OK2MUNGE)) { 130 if (!(skb->tc_verd & TC_OK2MUNGE)) {
131 /* should we set skb->cloned? */ 131 /* should we set skb->cloned? */
@@ -134,7 +134,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
134 } 134 }
135 } 135 }
136 136
137 pptr = skb_network_header(skb); 137 off = skb_network_offset(skb);
138 138
139 spin_lock(&p->tcf_lock); 139 spin_lock(&p->tcf_lock);
140 140
@@ -144,17 +144,17 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
144 struct tc_pedit_key *tkey = p->tcfp_keys; 144 struct tc_pedit_key *tkey = p->tcfp_keys;
145 145
146 for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { 146 for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
147 u32 *ptr; 147 u32 *ptr, _data;
148 int offset = tkey->off; 148 int offset = tkey->off;
149 149
150 if (tkey->offmask) { 150 if (tkey->offmask) {
151 if (skb->len > tkey->at) { 151 char *d, _d;
152 char *j = pptr + tkey->at; 152
153 offset += ((*j & tkey->offmask) >> 153 d = skb_header_pointer(skb, off + tkey->at, 1,
154 tkey->shift); 154 &_d);
155 } else { 155 if (!d)
156 goto bad; 156 goto bad;
157 } 157 offset += (*d & tkey->offmask) >> tkey->shift;
158 } 158 }
159 159
160 if (offset % 4) { 160 if (offset % 4) {
@@ -169,9 +169,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
169 goto bad; 169 goto bad;
170 } 170 }
171 171
172 ptr = (u32 *)(pptr+offset); 172 ptr = skb_header_pointer(skb, off + offset, 4, &_data);
173 if (!ptr)
174 goto bad;
173 /* just do it, baby */ 175 /* just do it, baby */
174 *ptr = ((*ptr & tkey->mask) ^ tkey->val); 176 *ptr = ((*ptr & tkey->mask) ^ tkey->val);
177 if (ptr == &_data)
178 skb_store_bits(skb, off + offset, ptr, 4);
175 munged++; 179 munged++;
176 } 180 }
177 181
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 96275422c619..4f522143811e 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
98{ 98{
99 struct { 99 struct {
100 struct tc_u_knode *knode; 100 struct tc_u_knode *knode;
101 u8 *ptr; 101 unsigned int off;
102 } stack[TC_U32_MAXDEPTH]; 102 } stack[TC_U32_MAXDEPTH];
103 103
104 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; 104 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
105 u8 *ptr = skb_network_header(skb); 105 unsigned int off = skb_network_offset(skb);
106 struct tc_u_knode *n; 106 struct tc_u_knode *n;
107 int sdepth = 0; 107 int sdepth = 0;
108 int off2 = 0; 108 int off2 = 0;
@@ -134,8 +134,14 @@ next_knode:
134#endif 134#endif
135 135
136 for (i = n->sel.nkeys; i>0; i--, key++) { 136 for (i = n->sel.nkeys; i>0; i--, key++) {
137 137 unsigned int toff;
138 if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { 138 __be32 *data, _data;
139
140 toff = off + key->off + (off2 & key->offmask);
141 data = skb_header_pointer(skb, toff, 4, &_data);
142 if (!data)
143 goto out;
144 if ((*data ^ key->val) & key->mask) {
139 n = n->next; 145 n = n->next;
140 goto next_knode; 146 goto next_knode;
141 } 147 }
@@ -174,29 +180,45 @@ check_terminal:
174 if (sdepth >= TC_U32_MAXDEPTH) 180 if (sdepth >= TC_U32_MAXDEPTH)
175 goto deadloop; 181 goto deadloop;
176 stack[sdepth].knode = n; 182 stack[sdepth].knode = n;
177 stack[sdepth].ptr = ptr; 183 stack[sdepth].off = off;
178 sdepth++; 184 sdepth++;
179 185
180 ht = n->ht_down; 186 ht = n->ht_down;
181 sel = 0; 187 sel = 0;
182 if (ht->divisor) 188 if (ht->divisor) {
183 sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); 189 __be32 *data, _data;
184 190
191 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
192 &_data);
193 if (!data)
194 goto out;
195 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
196 n->fshift);
197 }
185 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) 198 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
186 goto next_ht; 199 goto next_ht;
187 200
188 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { 201 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
189 off2 = n->sel.off + 3; 202 off2 = n->sel.off + 3;
190 if (n->sel.flags&TC_U32_VAROFFSET) 203 if (n->sel.flags & TC_U32_VAROFFSET) {
191 off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; 204 __be16 *data, _data;
205
206 data = skb_header_pointer(skb,
207 off + n->sel.offoff,
208 2, &_data);
209 if (!data)
210 goto out;
211 off2 += ntohs(n->sel.offmask & *data) >>
212 n->sel.offshift;
213 }
192 off2 &= ~3; 214 off2 &= ~3;
193 } 215 }
194 if (n->sel.flags&TC_U32_EAT) { 216 if (n->sel.flags&TC_U32_EAT) {
195 ptr += off2; 217 off += off2;
196 off2 = 0; 218 off2 = 0;
197 } 219 }
198 220
199 if (ptr < skb_tail_pointer(skb)) 221 if (off < skb->len)
200 goto next_ht; 222 goto next_ht;
201 } 223 }
202 224
@@ -204,9 +226,10 @@ check_terminal:
204 if (sdepth--) { 226 if (sdepth--) {
205 n = stack[sdepth].knode; 227 n = stack[sdepth].knode;
206 ht = n->ht_up; 228 ht = n->ht_up;
207 ptr = stack[sdepth].ptr; 229 off = stack[sdepth].off;
208 goto check_terminal; 230 goto check_terminal;
209 } 231 }
232out:
210 return -1; 233 return -1;
211 234
212deadloop: 235deadloop:
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 3415b6ce1c0a..807643bdcbac 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -449,6 +449,7 @@ static __init void teql_master_setup(struct net_device *dev)
449 dev->tx_queue_len = 100; 449 dev->tx_queue_len = 100;
450 dev->flags = IFF_NOARP; 450 dev->flags = IFF_NOARP;
451 dev->hard_header_len = LL_MAX_HEADER; 451 dev->hard_header_len = LL_MAX_HEADER;
452 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
452} 453}
453 454
454static LIST_HEAD(master_dev_list); 455static LIST_HEAD(master_dev_list);
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 73affb8624fa..8dc47f1d0001 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -267,7 +267,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
267 * Run memory cache shrinker. 267 * Run memory cache shrinker.
268 */ 268 */
269static int 269static int
270rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) 270rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
271{ 271{
272 LIST_HEAD(free); 272 LIST_HEAD(free);
273 int res; 273 int res;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 2a9675136c68..7ca65c7005ea 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -210,7 +210,8 @@ struct sock_xprt {
210 * State of TCP reply receive 210 * State of TCP reply receive
211 */ 211 */
212 __be32 tcp_fraghdr, 212 __be32 tcp_fraghdr,
213 tcp_xid; 213 tcp_xid,
214 tcp_calldir;
214 215
215 u32 tcp_offset, 216 u32 tcp_offset,
216 tcp_reclen; 217 tcp_reclen;
@@ -927,7 +928,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
927{ 928{
928 size_t len, used; 929 size_t len, used;
929 u32 offset; 930 u32 offset;
930 __be32 calldir; 931 char *p;
931 932
932 /* 933 /*
933 * We want transport->tcp_offset to be 8 at the end of this routine 934 * We want transport->tcp_offset to be 8 at the end of this routine
@@ -936,26 +937,33 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
936 * transport->tcp_offset is 4 (after having already read the xid). 937 * transport->tcp_offset is 4 (after having already read the xid).
937 */ 938 */
938 offset = transport->tcp_offset - sizeof(transport->tcp_xid); 939 offset = transport->tcp_offset - sizeof(transport->tcp_xid);
939 len = sizeof(calldir) - offset; 940 len = sizeof(transport->tcp_calldir) - offset;
940 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len); 941 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
941 used = xdr_skb_read_bits(desc, &calldir, len); 942 p = ((char *) &transport->tcp_calldir) + offset;
943 used = xdr_skb_read_bits(desc, p, len);
942 transport->tcp_offset += used; 944 transport->tcp_offset += used;
943 if (used != len) 945 if (used != len)
944 return; 946 return;
945 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR; 947 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
946 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
947 transport->tcp_flags |= TCP_RCV_COPY_DATA;
948 /* 948 /*
949 * We don't yet have the XDR buffer, so we will write the calldir 949 * We don't yet have the XDR buffer, so we will write the calldir
950 * out after we get the buffer from the 'struct rpc_rqst' 950 * out after we get the buffer from the 'struct rpc_rqst'
951 */ 951 */
952 if (ntohl(calldir) == RPC_REPLY) 952 switch (ntohl(transport->tcp_calldir)) {
953 case RPC_REPLY:
954 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
955 transport->tcp_flags |= TCP_RCV_COPY_DATA;
953 transport->tcp_flags |= TCP_RPC_REPLY; 956 transport->tcp_flags |= TCP_RPC_REPLY;
954 else 957 break;
958 case RPC_CALL:
959 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
960 transport->tcp_flags |= TCP_RCV_COPY_DATA;
955 transport->tcp_flags &= ~TCP_RPC_REPLY; 961 transport->tcp_flags &= ~TCP_RPC_REPLY;
956 dprintk("RPC: reading %s CALL/REPLY flag %08x\n", 962 break;
957 (transport->tcp_flags & TCP_RPC_REPLY) ? 963 default:
958 "reply for" : "request with", calldir); 964 dprintk("RPC: invalid request message type\n");
965 xprt_force_disconnect(&transport->xprt);
966 }
959 xs_tcp_check_fraghdr(transport); 967 xs_tcp_check_fraghdr(transport);
960} 968}
961 969
@@ -975,12 +983,10 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
975 /* 983 /*
976 * Save the RPC direction in the XDR buffer 984 * Save the RPC direction in the XDR buffer
977 */ 985 */
978 __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ?
979 htonl(RPC_REPLY) : 0;
980
981 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied, 986 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
982 &calldir, sizeof(calldir)); 987 &transport->tcp_calldir,
983 transport->tcp_copied += sizeof(calldir); 988 sizeof(transport->tcp_calldir));
989 transport->tcp_copied += sizeof(transport->tcp_calldir);
984 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR; 990 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
985 } 991 }
986 992
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 6a329158bdfa..a3cca0a94346 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -95,13 +95,13 @@ resume:
95 goto error_nolock; 95 goto error_nolock;
96 } 96 }
97 97
98 dst = dst_pop(dst); 98 dst = skb_dst_pop(skb);
99 if (!dst) { 99 if (!dst) {
100 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 100 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
101 err = -EHOSTUNREACH; 101 err = -EHOSTUNREACH;
102 goto error_nolock; 102 goto error_nolock;
103 } 103 }
104 skb_dst_set(skb, dst); 104 skb_dst_set_noref(skb, dst);
105 x = dst->xfrm; 105 x = dst->xfrm;
106 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); 106 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
107 107
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d965a2bad8d3..a7ec5a8a2380 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1594,8 +1594,8 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1594 1594
1595 /* Try to instantiate a bundle */ 1595 /* Try to instantiate a bundle */
1596 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1596 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1597 if (err < 0) { 1597 if (err <= 0) {
1598 if (err != -EAGAIN) 1598 if (err != 0 && err != -EAGAIN)
1599 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1599 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1600 return ERR_PTR(err); 1600 return ERR_PTR(err);
1601 } 1601 }
@@ -1678,6 +1678,13 @@ xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
1678 goto make_dummy_bundle; 1678 goto make_dummy_bundle;
1679 dst_hold(&xdst->u.dst); 1679 dst_hold(&xdst->u.dst);
1680 return oldflo; 1680 return oldflo;
1681 } else if (new_xdst == NULL) {
1682 num_xfrms = 0;
1683 if (oldflo == NULL)
1684 goto make_dummy_bundle;
1685 xdst->num_xfrms = 0;
1686 dst_hold(&xdst->u.dst);
1687 return oldflo;
1681 } 1688 }
1682 1689
1683 /* Kill the previous bundle */ 1690 /* Kill the previous bundle */
@@ -1760,6 +1767,10 @@ restart:
1760 xfrm_pols_put(pols, num_pols); 1767 xfrm_pols_put(pols, num_pols);
1761 err = PTR_ERR(xdst); 1768 err = PTR_ERR(xdst);
1762 goto dropdst; 1769 goto dropdst;
1770 } else if (xdst == NULL) {
1771 num_xfrms = 0;
1772 drop_pols = num_pols;
1773 goto no_transform;
1763 } 1774 }
1764 1775
1765 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 1776 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
@@ -2153,6 +2164,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2153 return 0; 2164 return 0;
2154 } 2165 }
2155 2166
2167 skb_dst_force(skb);
2156 dst = skb_dst(skb); 2168 dst = skb_dst(skb);
2157 2169
2158 res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; 2170 res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;
@@ -2299,7 +2311,8 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
2299 return 0; 2311 return 0;
2300 if (xdst->xfrm_genid != dst->xfrm->genid) 2312 if (xdst->xfrm_genid != dst->xfrm->genid)
2301 return 0; 2313 return 0;
2302 if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 2314 if (xdst->num_pols > 0 &&
2315 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2303 return 0; 2316 return 0;
2304 2317
2305 if (strict && fl && 2318 if (strict && fl &&