diff options
Diffstat (limited to 'net')
45 files changed, 373 insertions, 144 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index bd537fc10254..50f58f5f1c34 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
12 | return NET_RX_DROP; | 12 | return NET_RX_DROP; |
13 | 13 | ||
14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
15 | goto drop; | 15 | skb->deliver_no_wcard = 1; |
16 | 16 | ||
17 | skb->skb_iif = skb->dev->ifindex; | 17 | skb->skb_iif = skb->dev->ifindex; |
18 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 18 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
@@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | |||
84 | struct sk_buff *p; | 84 | struct sk_buff *p; |
85 | 85 | ||
86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
87 | goto drop; | 87 | skb->deliver_no_wcard = 1; |
88 | 88 | ||
89 | skb->skb_iif = skb->dev->ifindex; | 89 | skb->skb_iif = skb->dev->ifindex; |
90 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 90 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index 0faad5ce6dc4..8c100c9dae28 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -104,6 +104,8 @@ static void bnep_net_set_mc_list(struct net_device *dev) | |||
104 | break; | 104 | break; |
105 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); | 105 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); |
106 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); | 106 | memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); |
107 | |||
108 | i++; | ||
107 | } | 109 | } |
108 | r->len = htons(skb->len - len); | 110 | r->len = htons(skb->len - len); |
109 | } | 111 | } |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index b10e3cdb08f8..800b6b9fbbae 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -358,6 +358,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 | |||
358 | acl->sec_level = sec_level; | 358 | acl->sec_level = sec_level; |
359 | acl->auth_type = auth_type; | 359 | acl->auth_type = auth_type; |
360 | hci_acl_connect(acl); | 360 | hci_acl_connect(acl); |
361 | } else { | ||
362 | if (acl->sec_level < sec_level) | ||
363 | acl->sec_level = sec_level; | ||
364 | if (acl->auth_type < auth_type) | ||
365 | acl->auth_type = auth_type; | ||
361 | } | 366 | } |
362 | 367 | ||
363 | if (type == ACL_LINK) | 368 | if (type == ACL_LINK) |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 6c57fc71c7e2..786b5de0bac4 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -1049,6 +1049,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s | |||
1049 | if (conn) { | 1049 | if (conn) { |
1050 | if (!ev->status) | 1050 | if (!ev->status) |
1051 | conn->link_mode |= HCI_LM_AUTH; | 1051 | conn->link_mode |= HCI_LM_AUTH; |
1052 | else | ||
1053 | conn->sec_level = BT_SECURITY_LOW; | ||
1052 | 1054 | ||
1053 | clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); | 1055 | clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); |
1054 | 1056 | ||
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 1b682a5aa061..cf3c4073a8a6 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -401,6 +401,11 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) | |||
401 | l2cap_send_sframe(pi, control); | 401 | l2cap_send_sframe(pi, control); |
402 | } | 402 | } |
403 | 403 | ||
404 | static inline int __l2cap_no_conn_pending(struct sock *sk) | ||
405 | { | ||
406 | return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND); | ||
407 | } | ||
408 | |||
404 | static void l2cap_do_start(struct sock *sk) | 409 | static void l2cap_do_start(struct sock *sk) |
405 | { | 410 | { |
406 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; | 411 | struct l2cap_conn *conn = l2cap_pi(sk)->conn; |
@@ -409,12 +414,13 @@ static void l2cap_do_start(struct sock *sk) | |||
409 | if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) | 414 | if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) |
410 | return; | 415 | return; |
411 | 416 | ||
412 | if (l2cap_check_security(sk)) { | 417 | if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) { |
413 | struct l2cap_conn_req req; | 418 | struct l2cap_conn_req req; |
414 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); | 419 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); |
415 | req.psm = l2cap_pi(sk)->psm; | 420 | req.psm = l2cap_pi(sk)->psm; |
416 | 421 | ||
417 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); | 422 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); |
423 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; | ||
418 | 424 | ||
419 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | 425 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, |
420 | L2CAP_CONN_REQ, sizeof(req), &req); | 426 | L2CAP_CONN_REQ, sizeof(req), &req); |
@@ -464,12 +470,14 @@ static void l2cap_conn_start(struct l2cap_conn *conn) | |||
464 | } | 470 | } |
465 | 471 | ||
466 | if (sk->sk_state == BT_CONNECT) { | 472 | if (sk->sk_state == BT_CONNECT) { |
467 | if (l2cap_check_security(sk)) { | 473 | if (l2cap_check_security(sk) && |
474 | __l2cap_no_conn_pending(sk)) { | ||
468 | struct l2cap_conn_req req; | 475 | struct l2cap_conn_req req; |
469 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); | 476 | req.scid = cpu_to_le16(l2cap_pi(sk)->scid); |
470 | req.psm = l2cap_pi(sk)->psm; | 477 | req.psm = l2cap_pi(sk)->psm; |
471 | 478 | ||
472 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); | 479 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); |
480 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; | ||
473 | 481 | ||
474 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | 482 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, |
475 | L2CAP_CONN_REQ, sizeof(req), &req); | 483 | L2CAP_CONN_REQ, sizeof(req), &req); |
@@ -2912,7 +2920,6 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd | |||
2912 | l2cap_pi(sk)->ident = 0; | 2920 | l2cap_pi(sk)->ident = 0; |
2913 | l2cap_pi(sk)->dcid = dcid; | 2921 | l2cap_pi(sk)->dcid = dcid; |
2914 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; | 2922 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; |
2915 | |||
2916 | l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; | 2923 | l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; |
2917 | 2924 | ||
2918 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, | 2925 | l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, |
@@ -4404,6 +4411,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) | |||
4404 | req.psm = l2cap_pi(sk)->psm; | 4411 | req.psm = l2cap_pi(sk)->psm; |
4405 | 4412 | ||
4406 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); | 4413 | l2cap_pi(sk)->ident = l2cap_get_ident(conn); |
4414 | l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; | ||
4407 | 4415 | ||
4408 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, | 4416 | l2cap_send_cmd(conn, l2cap_pi(sk)->ident, |
4409 | L2CAP_CONN_REQ, sizeof(req), &req); | 4417 | L2CAP_CONN_REQ, sizeof(req), &req); |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index eedf2c94820e..753fc4221f3c 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -217,14 +217,6 @@ static bool br_devices_support_netpoll(struct net_bridge *br) | |||
217 | return count != 0 && ret; | 217 | return count != 0 && ret; |
218 | } | 218 | } |
219 | 219 | ||
220 | static void br_poll_controller(struct net_device *br_dev) | ||
221 | { | ||
222 | struct netpoll *np = br_dev->npinfo->netpoll; | ||
223 | |||
224 | if (np->real_dev != br_dev) | ||
225 | netpoll_poll_dev(np->real_dev); | ||
226 | } | ||
227 | |||
228 | void br_netpoll_cleanup(struct net_device *dev) | 220 | void br_netpoll_cleanup(struct net_device *dev) |
229 | { | 221 | { |
230 | struct net_bridge *br = netdev_priv(dev); | 222 | struct net_bridge *br = netdev_priv(dev); |
@@ -295,7 +287,6 @@ static const struct net_device_ops br_netdev_ops = { | |||
295 | .ndo_do_ioctl = br_dev_ioctl, | 287 | .ndo_do_ioctl = br_dev_ioctl, |
296 | #ifdef CONFIG_NET_POLL_CONTROLLER | 288 | #ifdef CONFIG_NET_POLL_CONTROLLER |
297 | .ndo_netpoll_cleanup = br_netpoll_cleanup, | 289 | .ndo_netpoll_cleanup = br_netpoll_cleanup, |
298 | .ndo_poll_controller = br_poll_controller, | ||
299 | #endif | 290 | #endif |
300 | }; | 291 | }; |
301 | 292 | ||
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 26637439965b..b01dde35a69e 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -128,7 +128,7 @@ void br_fdb_cleanup(unsigned long _data) | |||
128 | { | 128 | { |
129 | struct net_bridge *br = (struct net_bridge *)_data; | 129 | struct net_bridge *br = (struct net_bridge *)_data; |
130 | unsigned long delay = hold_time(br); | 130 | unsigned long delay = hold_time(br); |
131 | unsigned long next_timer = jiffies + br->forward_delay; | 131 | unsigned long next_timer = jiffies + br->ageing_time; |
132 | int i; | 132 | int i; |
133 | 133 | ||
134 | spin_lock_bh(&br->hash_lock); | 134 | spin_lock_bh(&br->hash_lock); |
@@ -149,9 +149,7 @@ void br_fdb_cleanup(unsigned long _data) | |||
149 | } | 149 | } |
150 | spin_unlock_bh(&br->hash_lock); | 150 | spin_unlock_bh(&br->hash_lock); |
151 | 151 | ||
152 | /* Add HZ/4 to ensure we round the jiffies upwards to be after the next | 152 | mod_timer(&br->gc_timer, round_jiffies_up(next_timer)); |
153 | * timer, otherwise we might round down and will have no-op run. */ | ||
154 | mod_timer(&br->gc_timer, round_jiffies(next_timer + HZ/4)); | ||
155 | } | 153 | } |
156 | 154 | ||
157 | /* Completely flush all dynamic entries in forwarding database.*/ | 155 | /* Completely flush all dynamic entries in forwarding database.*/ |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index a98ef1393097..595da45f9088 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -50,14 +50,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb) | |||
50 | kfree_skb(skb); | 50 | kfree_skb(skb); |
51 | else { | 51 | else { |
52 | skb_push(skb, ETH_HLEN); | 52 | skb_push(skb, ETH_HLEN); |
53 | 53 | dev_queue_xmit(skb); | |
54 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
55 | if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) { | ||
56 | netpoll_send_skb(skb->dev->npinfo->netpoll, skb); | ||
57 | skb->dev->priv_flags &= ~IFF_IN_NETPOLL; | ||
58 | } else | ||
59 | #endif | ||
60 | dev_queue_xmit(skb); | ||
61 | } | 54 | } |
62 | } | 55 | } |
63 | 56 | ||
@@ -73,23 +66,9 @@ int br_forward_finish(struct sk_buff *skb) | |||
73 | 66 | ||
74 | static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | 67 | static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) |
75 | { | 68 | { |
76 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
77 | struct net_bridge *br = to->br; | ||
78 | if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) { | ||
79 | struct netpoll *np; | ||
80 | to->dev->npinfo = skb->dev->npinfo; | ||
81 | np = skb->dev->npinfo->netpoll; | ||
82 | np->real_dev = np->dev = to->dev; | ||
83 | to->dev->priv_flags |= IFF_IN_NETPOLL; | ||
84 | } | ||
85 | #endif | ||
86 | skb->dev = to->dev; | 69 | skb->dev = to->dev; |
87 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 70 | NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, |
88 | br_forward_finish); | 71 | br_forward_finish); |
89 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
90 | if (skb->dev->npinfo) | ||
91 | skb->dev->npinfo->netpoll->dev = br->dev; | ||
92 | #endif | ||
93 | } | 72 | } |
94 | 73 | ||
95 | static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) | 74 | static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) |
@@ -140,10 +119,10 @@ static int deliver_clone(const struct net_bridge_port *prev, | |||
140 | void (*__packet_hook)(const struct net_bridge_port *p, | 119 | void (*__packet_hook)(const struct net_bridge_port *p, |
141 | struct sk_buff *skb)) | 120 | struct sk_buff *skb)) |
142 | { | 121 | { |
122 | struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; | ||
123 | |||
143 | skb = skb_clone(skb, GFP_ATOMIC); | 124 | skb = skb_clone(skb, GFP_ATOMIC); |
144 | if (!skb) { | 125 | if (!skb) { |
145 | struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; | ||
146 | |||
147 | dev->stats.tx_dropped++; | 126 | dev->stats.tx_dropped++; |
148 | return -ENOMEM; | 127 | return -ENOMEM; |
149 | } | 128 | } |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 9d21d98ae5fa..27ae946363f1 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -99,6 +99,15 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get( | |||
99 | return NULL; | 99 | return NULL; |
100 | } | 100 | } |
101 | 101 | ||
102 | static struct net_bridge_mdb_entry *br_mdb_ip_get( | ||
103 | struct net_bridge_mdb_htable *mdb, struct br_ip *dst) | ||
104 | { | ||
105 | if (!mdb) | ||
106 | return NULL; | ||
107 | |||
108 | return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); | ||
109 | } | ||
110 | |||
102 | static struct net_bridge_mdb_entry *br_mdb_ip4_get( | 111 | static struct net_bridge_mdb_entry *br_mdb_ip4_get( |
103 | struct net_bridge_mdb_htable *mdb, __be32 dst) | 112 | struct net_bridge_mdb_htable *mdb, __be32 dst) |
104 | { | 113 | { |
@@ -107,7 +116,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get( | |||
107 | br_dst.u.ip4 = dst; | 116 | br_dst.u.ip4 = dst; |
108 | br_dst.proto = htons(ETH_P_IP); | 117 | br_dst.proto = htons(ETH_P_IP); |
109 | 118 | ||
110 | return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst)); | 119 | return br_mdb_ip_get(mdb, &br_dst); |
111 | } | 120 | } |
112 | 121 | ||
113 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 122 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
@@ -119,23 +128,17 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get( | |||
119 | ipv6_addr_copy(&br_dst.u.ip6, dst); | 128 | ipv6_addr_copy(&br_dst.u.ip6, dst); |
120 | br_dst.proto = htons(ETH_P_IPV6); | 129 | br_dst.proto = htons(ETH_P_IPV6); |
121 | 130 | ||
122 | return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst)); | 131 | return br_mdb_ip_get(mdb, &br_dst); |
123 | } | 132 | } |
124 | #endif | 133 | #endif |
125 | 134 | ||
126 | static struct net_bridge_mdb_entry *br_mdb_ip_get( | ||
127 | struct net_bridge_mdb_htable *mdb, struct br_ip *dst) | ||
128 | { | ||
129 | return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); | ||
130 | } | ||
131 | |||
132 | struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 135 | struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
133 | struct sk_buff *skb) | 136 | struct sk_buff *skb) |
134 | { | 137 | { |
135 | struct net_bridge_mdb_htable *mdb = br->mdb; | 138 | struct net_bridge_mdb_htable *mdb = br->mdb; |
136 | struct br_ip ip; | 139 | struct br_ip ip; |
137 | 140 | ||
138 | if (!mdb || br->multicast_disabled) | 141 | if (br->multicast_disabled) |
139 | return NULL; | 142 | return NULL; |
140 | 143 | ||
141 | if (BR_INPUT_SKB_CB(skb)->igmp) | 144 | if (BR_INPUT_SKB_CB(skb)->igmp) |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 44420992f72f..8fb75f89c4aa 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -591,6 +591,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb, | |||
591 | 591 | ||
592 | pskb_trim_rcsum(skb, len); | 592 | pskb_trim_rcsum(skb, len); |
593 | 593 | ||
594 | /* BUG: Should really parse the IP options here. */ | ||
595 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | ||
596 | |||
594 | nf_bridge_put(skb->nf_bridge); | 597 | nf_bridge_put(skb->nf_bridge); |
595 | if (!nf_bridge_alloc(skb)) | 598 | if (!nf_bridge_alloc(skb)) |
596 | return NF_DROP; | 599 | return NF_DROP; |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index cd2830fec935..fd27b172fb5d 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c | |||
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
83 | if (!cfsrvl_ready(service, &ret)) | 83 | if (!cfsrvl_ready(service, &ret)) |
84 | return ret; | 84 | return ret; |
85 | 85 | ||
86 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 86 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
87 | pr_err("CAIF: %s():Packet too large - size=%d\n", | 87 | pr_err("CAIF: %s():Packet too large - size=%d\n", |
88 | __func__, cfpkt_getlen(pkt)); | 88 | __func__, cfpkt_getlen(pkt)); |
89 | return -EOVERFLOW; | 89 | return -EOVERFLOW; |
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c index 0fd827f49491..e04f7d964e83 100644 --- a/net/caif/cfveil.c +++ b/net/caif/cfveil.c | |||
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
84 | return ret; | 84 | return ret; |
85 | caif_assert(layr->dn != NULL); | 85 | caif_assert(layr->dn != NULL); |
86 | caif_assert(layr->dn->transmit != NULL); | 86 | caif_assert(layr->dn->transmit != NULL); |
87 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 87 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", | 88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", |
89 | __func__, cfpkt_getlen(pkt)); | 89 | __func__, cfpkt_getlen(pkt)); |
90 | return -EOVERFLOW; | 90 | return -EOVERFLOW; |
diff --git a/net/core/dev.c b/net/core/dev.c index d03470f5260a..1f466e82ac33 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1488,6 +1488,7 @@ static inline void net_timestamp_check(struct sk_buff *skb) | |||
1488 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | 1488 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
1489 | { | 1489 | { |
1490 | skb_orphan(skb); | 1490 | skb_orphan(skb); |
1491 | nf_reset(skb); | ||
1491 | 1492 | ||
1492 | if (!(dev->flags & IFF_UP) || | 1493 | if (!(dev->flags & IFF_UP) || |
1493 | (skb->len > (dev->mtu + dev->hard_header_len))) { | 1494 | (skb->len > (dev->mtu + dev->hard_header_len))) { |
@@ -1553,6 +1554,24 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1553 | rcu_read_unlock(); | 1554 | rcu_read_unlock(); |
1554 | } | 1555 | } |
1555 | 1556 | ||
1557 | /* | ||
1558 | * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues | ||
1559 | * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. | ||
1560 | */ | ||
1561 | void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) | ||
1562 | { | ||
1563 | unsigned int real_num = dev->real_num_tx_queues; | ||
1564 | |||
1565 | if (unlikely(txq > dev->num_tx_queues)) | ||
1566 | ; | ||
1567 | else if (txq > real_num) | ||
1568 | dev->real_num_tx_queues = txq; | ||
1569 | else if (txq < real_num) { | ||
1570 | dev->real_num_tx_queues = txq; | ||
1571 | qdisc_reset_all_tx_gt(dev, txq); | ||
1572 | } | ||
1573 | } | ||
1574 | EXPORT_SYMBOL(netif_set_real_num_tx_queues); | ||
1556 | 1575 | ||
1557 | static inline void __netif_reschedule(struct Qdisc *q) | 1576 | static inline void __netif_reschedule(struct Qdisc *q) |
1558 | { | 1577 | { |
@@ -1893,8 +1912,16 @@ static int dev_gso_segment(struct sk_buff *skb) | |||
1893 | */ | 1912 | */ |
1894 | static inline void skb_orphan_try(struct sk_buff *skb) | 1913 | static inline void skb_orphan_try(struct sk_buff *skb) |
1895 | { | 1914 | { |
1896 | if (!skb_tx(skb)->flags) | 1915 | struct sock *sk = skb->sk; |
1916 | |||
1917 | if (sk && !skb_tx(skb)->flags) { | ||
1918 | /* skb_tx_hash() wont be able to get sk. | ||
1919 | * We copy sk_hash into skb->rxhash | ||
1920 | */ | ||
1921 | if (!skb->rxhash) | ||
1922 | skb->rxhash = sk->sk_hash; | ||
1897 | skb_orphan(skb); | 1923 | skb_orphan(skb); |
1924 | } | ||
1898 | } | 1925 | } |
1899 | 1926 | ||
1900 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 1927 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
@@ -1980,8 +2007,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1980 | if (skb->sk && skb->sk->sk_hash) | 2007 | if (skb->sk && skb->sk->sk_hash) |
1981 | hash = skb->sk->sk_hash; | 2008 | hash = skb->sk->sk_hash; |
1982 | else | 2009 | else |
1983 | hash = (__force u16) skb->protocol; | 2010 | hash = (__force u16) skb->protocol ^ skb->rxhash; |
1984 | |||
1985 | hash = jhash_1word(hash, hashrnd); | 2011 | hash = jhash_1word(hash, hashrnd); |
1986 | 2012 | ||
1987 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); | 2013 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); |
@@ -2004,12 +2030,11 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | |||
2004 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, | 2030 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, |
2005 | struct sk_buff *skb) | 2031 | struct sk_buff *skb) |
2006 | { | 2032 | { |
2007 | u16 queue_index; | 2033 | int queue_index; |
2008 | struct sock *sk = skb->sk; | 2034 | struct sock *sk = skb->sk; |
2009 | 2035 | ||
2010 | if (sk_tx_queue_recorded(sk)) { | 2036 | queue_index = sk_tx_queue_get(sk); |
2011 | queue_index = sk_tx_queue_get(sk); | 2037 | if (queue_index < 0) { |
2012 | } else { | ||
2013 | const struct net_device_ops *ops = dev->netdev_ops; | 2038 | const struct net_device_ops *ops = dev->netdev_ops; |
2014 | 2039 | ||
2015 | if (ops->ndo_select_queue) { | 2040 | if (ops->ndo_select_queue) { |
@@ -2253,11 +2278,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
2253 | if (skb_rx_queue_recorded(skb)) { | 2278 | if (skb_rx_queue_recorded(skb)) { |
2254 | u16 index = skb_get_rx_queue(skb); | 2279 | u16 index = skb_get_rx_queue(skb); |
2255 | if (unlikely(index >= dev->num_rx_queues)) { | 2280 | if (unlikely(index >= dev->num_rx_queues)) { |
2256 | if (net_ratelimit()) { | 2281 | WARN_ONCE(dev->num_rx_queues > 1, "%s received packet " |
2257 | pr_warning("%s received packet on queue " | 2282 | "on queue %u, but number of RX queues is %u\n", |
2258 | "%u, but number of RX queues is %u\n", | 2283 | dev->name, index, dev->num_rx_queues); |
2259 | dev->name, index, dev->num_rx_queues); | ||
2260 | } | ||
2261 | goto done; | 2284 | goto done; |
2262 | } | 2285 | } |
2263 | rxqueue = dev->_rx + index; | 2286 | rxqueue = dev->_rx + index; |
@@ -2812,13 +2835,24 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2812 | if (!skb->skb_iif) | 2835 | if (!skb->skb_iif) |
2813 | skb->skb_iif = skb->dev->ifindex; | 2836 | skb->skb_iif = skb->dev->ifindex; |
2814 | 2837 | ||
2838 | /* | ||
2839 | * bonding note: skbs received on inactive slaves should only | ||
2840 | * be delivered to pkt handlers that are exact matches. Also | ||
2841 | * the deliver_no_wcard flag will be set. If packet handlers | ||
2842 | * are sensitive to duplicate packets these skbs will need to | ||
2843 | * be dropped at the handler. The vlan accel path may have | ||
2844 | * already set the deliver_no_wcard flag. | ||
2845 | */ | ||
2815 | null_or_orig = NULL; | 2846 | null_or_orig = NULL; |
2816 | orig_dev = skb->dev; | 2847 | orig_dev = skb->dev; |
2817 | master = ACCESS_ONCE(orig_dev->master); | 2848 | master = ACCESS_ONCE(orig_dev->master); |
2818 | if (master) { | 2849 | if (skb->deliver_no_wcard) |
2819 | if (skb_bond_should_drop(skb, master)) | 2850 | null_or_orig = orig_dev; |
2851 | else if (master) { | ||
2852 | if (skb_bond_should_drop(skb, master)) { | ||
2853 | skb->deliver_no_wcard = 1; | ||
2820 | null_or_orig = orig_dev; /* deliver only exact match */ | 2854 | null_or_orig = orig_dev; /* deliver only exact match */ |
2821 | else | 2855 | } else |
2822 | skb->dev = master; | 2856 | skb->dev = master; |
2823 | } | 2857 | } |
2824 | 2858 | ||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index a0f4964033d2..75e4ffeb8cc9 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -318,23 +318,33 @@ out: | |||
318 | } | 318 | } |
319 | 319 | ||
320 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, | 320 | static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, |
321 | void __user *useraddr) | 321 | u32 cmd, void __user *useraddr) |
322 | { | 322 | { |
323 | struct ethtool_rxnfc cmd; | 323 | struct ethtool_rxnfc info; |
324 | size_t info_size = sizeof(info); | ||
324 | 325 | ||
325 | if (!dev->ethtool_ops->set_rxnfc) | 326 | if (!dev->ethtool_ops->set_rxnfc) |
326 | return -EOPNOTSUPP; | 327 | return -EOPNOTSUPP; |
327 | 328 | ||
328 | if (copy_from_user(&cmd, useraddr, sizeof(cmd))) | 329 | /* struct ethtool_rxnfc was originally defined for |
330 | * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data | ||
331 | * members. User-space might still be using that | ||
332 | * definition. */ | ||
333 | if (cmd == ETHTOOL_SRXFH) | ||
334 | info_size = (offsetof(struct ethtool_rxnfc, data) + | ||
335 | sizeof(info.data)); | ||
336 | |||
337 | if (copy_from_user(&info, useraddr, info_size)) | ||
329 | return -EFAULT; | 338 | return -EFAULT; |
330 | 339 | ||
331 | return dev->ethtool_ops->set_rxnfc(dev, &cmd); | 340 | return dev->ethtool_ops->set_rxnfc(dev, &info); |
332 | } | 341 | } |
333 | 342 | ||
334 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, | 343 | static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, |
335 | void __user *useraddr) | 344 | u32 cmd, void __user *useraddr) |
336 | { | 345 | { |
337 | struct ethtool_rxnfc info; | 346 | struct ethtool_rxnfc info; |
347 | size_t info_size = sizeof(info); | ||
338 | const struct ethtool_ops *ops = dev->ethtool_ops; | 348 | const struct ethtool_ops *ops = dev->ethtool_ops; |
339 | int ret; | 349 | int ret; |
340 | void *rule_buf = NULL; | 350 | void *rule_buf = NULL; |
@@ -342,13 +352,22 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, | |||
342 | if (!ops->get_rxnfc) | 352 | if (!ops->get_rxnfc) |
343 | return -EOPNOTSUPP; | 353 | return -EOPNOTSUPP; |
344 | 354 | ||
345 | if (copy_from_user(&info, useraddr, sizeof(info))) | 355 | /* struct ethtool_rxnfc was originally defined for |
356 | * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data | ||
357 | * members. User-space might still be using that | ||
358 | * definition. */ | ||
359 | if (cmd == ETHTOOL_GRXFH) | ||
360 | info_size = (offsetof(struct ethtool_rxnfc, data) + | ||
361 | sizeof(info.data)); | ||
362 | |||
363 | if (copy_from_user(&info, useraddr, info_size)) | ||
346 | return -EFAULT; | 364 | return -EFAULT; |
347 | 365 | ||
348 | if (info.cmd == ETHTOOL_GRXCLSRLALL) { | 366 | if (info.cmd == ETHTOOL_GRXCLSRLALL) { |
349 | if (info.rule_cnt > 0) { | 367 | if (info.rule_cnt > 0) { |
350 | rule_buf = kmalloc(info.rule_cnt * sizeof(u32), | 368 | if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) |
351 | GFP_USER); | 369 | rule_buf = kmalloc(info.rule_cnt * sizeof(u32), |
370 | GFP_USER); | ||
352 | if (!rule_buf) | 371 | if (!rule_buf) |
353 | return -ENOMEM; | 372 | return -ENOMEM; |
354 | } | 373 | } |
@@ -359,7 +378,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, | |||
359 | goto err_out; | 378 | goto err_out; |
360 | 379 | ||
361 | ret = -EFAULT; | 380 | ret = -EFAULT; |
362 | if (copy_to_user(useraddr, &info, sizeof(info))) | 381 | if (copy_to_user(useraddr, &info, info_size)) |
363 | goto err_out; | 382 | goto err_out; |
364 | 383 | ||
365 | if (rule_buf) { | 384 | if (rule_buf) { |
@@ -1516,12 +1535,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
1516 | case ETHTOOL_GRXCLSRLCNT: | 1535 | case ETHTOOL_GRXCLSRLCNT: |
1517 | case ETHTOOL_GRXCLSRULE: | 1536 | case ETHTOOL_GRXCLSRULE: |
1518 | case ETHTOOL_GRXCLSRLALL: | 1537 | case ETHTOOL_GRXCLSRLALL: |
1519 | rc = ethtool_get_rxnfc(dev, useraddr); | 1538 | rc = ethtool_get_rxnfc(dev, ethcmd, useraddr); |
1520 | break; | 1539 | break; |
1521 | case ETHTOOL_SRXFH: | 1540 | case ETHTOOL_SRXFH: |
1522 | case ETHTOOL_SRXCLSRLDEL: | 1541 | case ETHTOOL_SRXCLSRLDEL: |
1523 | case ETHTOOL_SRXCLSRLINS: | 1542 | case ETHTOOL_SRXCLSRLINS: |
1524 | rc = ethtool_set_rxnfc(dev, useraddr); | 1543 | rc = ethtool_set_rxnfc(dev, ethcmd, useraddr); |
1525 | break; | 1544 | break; |
1526 | case ETHTOOL_GGRO: | 1545 | case ETHTOOL_GGRO: |
1527 | rc = ethtool_get_gro(dev, useraddr); | 1546 | rc = ethtool_get_gro(dev, useraddr); |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index cf8e70392fe0..785e5276a300 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock); | |||
107 | 107 | ||
108 | /* Protects against soft lockup during large deletion */ | 108 | /* Protects against soft lockup during large deletion */ |
109 | static struct rb_root est_root = RB_ROOT; | 109 | static struct rb_root est_root = RB_ROOT; |
110 | static DEFINE_SPINLOCK(est_tree_lock); | ||
110 | 111 | ||
111 | static void est_timer(unsigned long arg) | 112 | static void est_timer(unsigned long arg) |
112 | { | 113 | { |
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats | |||
201 | * | 202 | * |
202 | * Returns 0 on success or a negative error code. | 203 | * Returns 0 on success or a negative error code. |
203 | * | 204 | * |
204 | * NOTE: Called under rtnl_mutex | ||
205 | */ | 205 | */ |
206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | 206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, |
207 | struct gnet_stats_rate_est *rate_est, | 207 | struct gnet_stats_rate_est *rate_est, |
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
232 | est->last_packets = bstats->packets; | 232 | est->last_packets = bstats->packets; |
233 | est->avpps = rate_est->pps<<10; | 233 | est->avpps = rate_est->pps<<10; |
234 | 234 | ||
235 | spin_lock(&est_tree_lock); | ||
235 | if (!elist[idx].timer.function) { | 236 | if (!elist[idx].timer.function) { |
236 | INIT_LIST_HEAD(&elist[idx].list); | 237 | INIT_LIST_HEAD(&elist[idx].list); |
237 | setup_timer(&elist[idx].timer, est_timer, idx); | 238 | setup_timer(&elist[idx].timer, est_timer, idx); |
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
242 | 243 | ||
243 | list_add_rcu(&est->list, &elist[idx].list); | 244 | list_add_rcu(&est->list, &elist[idx].list); |
244 | gen_add_node(est); | 245 | gen_add_node(est); |
246 | spin_unlock(&est_tree_lock); | ||
245 | 247 | ||
246 | return 0; | 248 | return 0; |
247 | } | 249 | } |
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head) | |||
261 | * | 263 | * |
262 | * Removes the rate estimator specified by &bstats and &rate_est. | 264 | * Removes the rate estimator specified by &bstats and &rate_est. |
263 | * | 265 | * |
264 | * NOTE: Called under rtnl_mutex | ||
265 | */ | 266 | */ |
266 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | 267 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, |
267 | struct gnet_stats_rate_est *rate_est) | 268 | struct gnet_stats_rate_est *rate_est) |
268 | { | 269 | { |
269 | struct gen_estimator *e; | 270 | struct gen_estimator *e; |
270 | 271 | ||
272 | spin_lock(&est_tree_lock); | ||
271 | while ((e = gen_find_node(bstats, rate_est))) { | 273 | while ((e = gen_find_node(bstats, rate_est))) { |
272 | rb_erase(&e->node, &est_root); | 274 | rb_erase(&e->node, &est_root); |
273 | 275 | ||
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
278 | list_del_rcu(&e->list); | 280 | list_del_rcu(&e->list); |
279 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 281 | call_rcu(&e->e_rcu, __gen_kill_estimator); |
280 | } | 282 | } |
283 | spin_unlock(&est_tree_lock); | ||
281 | } | 284 | } |
282 | EXPORT_SYMBOL(gen_kill_estimator); | 285 | EXPORT_SYMBOL(gen_kill_estimator); |
283 | 286 | ||
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator); | |||
312 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, | 315 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, |
313 | const struct gnet_stats_rate_est *rate_est) | 316 | const struct gnet_stats_rate_est *rate_est) |
314 | { | 317 | { |
318 | bool res; | ||
319 | |||
315 | ASSERT_RTNL(); | 320 | ASSERT_RTNL(); |
316 | 321 | ||
317 | return gen_find_node(bstats, rate_est) != NULL; | 322 | spin_lock(&est_tree_lock); |
323 | res = gen_find_node(bstats, rate_est) != NULL; | ||
324 | spin_unlock(&est_tree_lock); | ||
325 | |||
326 | return res; | ||
318 | } | 327 | } |
319 | EXPORT_SYMBOL(gen_estimator_active); | 328 | EXPORT_SYMBOL(gen_estimator_active); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 6ba1c0eece03..a4e0a7482c2b 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -949,7 +949,10 @@ static void neigh_update_hhs(struct neighbour *neigh) | |||
949 | { | 949 | { |
950 | struct hh_cache *hh; | 950 | struct hh_cache *hh; |
951 | void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) | 951 | void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) |
952 | = neigh->dev->header_ops->cache_update; | 952 | = NULL; |
953 | |||
954 | if (neigh->dev->header_ops) | ||
955 | update = neigh->dev->header_ops->cache_update; | ||
953 | 956 | ||
954 | if (update) { | 957 | if (update) { |
955 | for (hh = neigh->hh; hh; hh = hh->hh_next) { | 958 | for (hh = neigh->hh; hh; hh = hh->hh_next) { |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2ad68da418df..1dacd7ba8dbb 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2170 | end_time = ktime_now(); | 2170 | end_time = ktime_now(); |
2171 | 2171 | ||
2172 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); | 2172 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); |
2173 | pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); | 2173 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); |
2174 | } | 2174 | } |
2175 | 2175 | ||
2176 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | 2176 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 9f07e749d7b1..ce88293a34e2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -532,6 +532,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
532 | new->ip_summed = old->ip_summed; | 532 | new->ip_summed = old->ip_summed; |
533 | skb_copy_queue_mapping(new, old); | 533 | skb_copy_queue_mapping(new, old); |
534 | new->priority = old->priority; | 534 | new->priority = old->priority; |
535 | new->deliver_no_wcard = old->deliver_no_wcard; | ||
535 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | 536 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) |
536 | new->ipvs_property = old->ipvs_property; | 537 | new->ipvs_property = old->ipvs_property; |
537 | #endif | 538 | #endif |
@@ -569,7 +570,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | |||
569 | C(len); | 570 | C(len); |
570 | C(data_len); | 571 | C(data_len); |
571 | C(mac_len); | 572 | C(mac_len); |
572 | C(rxhash); | ||
573 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; | 573 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
574 | n->cloned = 1; | 574 | n->cloned = 1; |
575 | n->nohdr = 0; | 575 | n->nohdr = 0; |
@@ -843,7 +843,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
843 | skb->network_header += off; | 843 | skb->network_header += off; |
844 | if (skb_mac_header_was_set(skb)) | 844 | if (skb_mac_header_was_set(skb)) |
845 | skb->mac_header += off; | 845 | skb->mac_header += off; |
846 | skb->csum_start += nhead; | 846 | /* Only adjust this if it actually is csum_start rather than csum */ |
847 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
848 | skb->csum_start += nhead; | ||
847 | skb->cloned = 0; | 849 | skb->cloned = 0; |
848 | skb->hdr_len = 0; | 850 | skb->hdr_len = 0; |
849 | skb->nohdr = 0; | 851 | skb->nohdr = 0; |
@@ -930,7 +932,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, | |||
930 | copy_skb_header(n, skb); | 932 | copy_skb_header(n, skb); |
931 | 933 | ||
932 | off = newheadroom - oldheadroom; | 934 | off = newheadroom - oldheadroom; |
933 | n->csum_start += off; | 935 | if (n->ip_summed == CHECKSUM_PARTIAL) |
936 | n->csum_start += off; | ||
934 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | 937 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
935 | n->transport_header += off; | 938 | n->transport_header += off; |
936 | n->network_header += off; | 939 | n->network_header += off; |
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index c51b55400dc5..11201784d29a 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | menuconfig NET_DSA | 1 | menuconfig NET_DSA |
2 | bool "Distributed Switch Architecture support" | 2 | bool "Distributed Switch Architecture support" |
3 | default n | 3 | default n |
4 | depends on EXPERIMENTAL && !S390 | 4 | depends on EXPERIMENTAL && NET_ETHERNET && !S390 |
5 | select PHYLIB | 5 | select PHYLIB |
6 | ---help--- | 6 | ---help--- |
7 | This allows you to use hardware switch chips that use | 7 | This allows you to use hardware switch chips that use |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 9a4a6c96cb0d..041d41df1224 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -873,8 +873,10 @@ int ip_append_data(struct sock *sk, | |||
873 | !exthdrlen) | 873 | !exthdrlen) |
874 | csummode = CHECKSUM_PARTIAL; | 874 | csummode = CHECKSUM_PARTIAL; |
875 | 875 | ||
876 | skb = skb_peek_tail(&sk->sk_write_queue); | ||
877 | |||
876 | inet->cork.length += length; | 878 | inet->cork.length += length; |
877 | if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) && | 879 | if (((length > mtu) || (skb && skb_is_gso(skb))) && |
878 | (sk->sk_protocol == IPPROTO_UDP) && | 880 | (sk->sk_protocol == IPPROTO_UDP) && |
879 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 881 | (rt->u.dst.dev->features & NETIF_F_UFO)) { |
880 | err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, | 882 | err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, |
@@ -892,7 +894,7 @@ int ip_append_data(struct sock *sk, | |||
892 | * adding appropriate IP header. | 894 | * adding appropriate IP header. |
893 | */ | 895 | */ |
894 | 896 | ||
895 | if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) | 897 | if (!skb) |
896 | goto alloc_new_skb; | 898 | goto alloc_new_skb; |
897 | 899 | ||
898 | while (length > 0) { | 900 | while (length > 0) { |
@@ -1121,7 +1123,8 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1121 | return -EINVAL; | 1123 | return -EINVAL; |
1122 | 1124 | ||
1123 | inet->cork.length += size; | 1125 | inet->cork.length += size; |
1124 | if ((sk->sk_protocol == IPPROTO_UDP) && | 1126 | if ((size + skb->len > mtu) && |
1127 | (sk->sk_protocol == IPPROTO_UDP) && | ||
1125 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 1128 | (rt->u.dst.dev->features & NETIF_F_UFO)) { |
1126 | skb_shinfo(skb)->gso_size = mtu - fragheaderlen; | 1129 | skb_shinfo(skb)->gso_size = mtu - fragheaderlen; |
1127 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | 1130 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 856123fe32f9..7f6273506eea 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -267,8 +267,10 @@ static void __net_exit ipmr_rules_exit(struct net *net) | |||
267 | { | 267 | { |
268 | struct mr_table *mrt, *next; | 268 | struct mr_table *mrt, *next; |
269 | 269 | ||
270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) | 270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { |
271 | list_del(&mrt->list); | ||
271 | kfree(mrt); | 272 | kfree(mrt); |
273 | } | ||
272 | fib_rules_unregister(net->ipv4.mr_rules_ops); | 274 | fib_rules_unregister(net->ipv4.mr_rules_ops); |
273 | } | 275 | } |
274 | #else | 276 | #else |
@@ -440,8 +442,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) | |||
440 | int err; | 442 | int err; |
441 | 443 | ||
442 | err = ipmr_fib_lookup(net, &fl, &mrt); | 444 | err = ipmr_fib_lookup(net, &fl, &mrt); |
443 | if (err < 0) | 445 | if (err < 0) { |
446 | kfree_skb(skb); | ||
444 | return err; | 447 | return err; |
448 | } | ||
445 | 449 | ||
446 | read_lock(&mrt_lock); | 450 | read_lock(&mrt_lock); |
447 | dev->stats.tx_bytes += skb->len; | 451 | dev->stats.tx_bytes += skb->len; |
@@ -1726,8 +1730,10 @@ int ip_mr_input(struct sk_buff *skb) | |||
1726 | goto dont_forward; | 1730 | goto dont_forward; |
1727 | 1731 | ||
1728 | err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); | 1732 | err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); |
1729 | if (err < 0) | 1733 | if (err < 0) { |
1734 | kfree_skb(skb); | ||
1730 | return err; | 1735 | return err; |
1736 | } | ||
1731 | 1737 | ||
1732 | if (!local) { | 1738 | if (!local) { |
1733 | if (IPCB(skb)->opt.router_alert) { | 1739 | if (IPCB(skb)->opt.router_alert) { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 6596b4feeddc..65afeaec15b7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -608,6 +608,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, | |||
608 | ssize_t spliced; | 608 | ssize_t spliced; |
609 | int ret; | 609 | int ret; |
610 | 610 | ||
611 | sock_rps_record_flow(sk); | ||
611 | /* | 612 | /* |
612 | * We can't seek on a socket input | 613 | * We can't seek on a socket input |
613 | */ | 614 | */ |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index b4ed957f201a..7ed9dc1042d1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2208,6 +2208,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2208 | int mib_idx; | 2208 | int mib_idx; |
2209 | int fwd_rexmitting = 0; | 2209 | int fwd_rexmitting = 0; |
2210 | 2210 | ||
2211 | if (!tp->packets_out) | ||
2212 | return; | ||
2213 | |||
2211 | if (!tp->lost_out) | 2214 | if (!tp->lost_out) |
2212 | tp->retransmit_high = tp->snd_una; | 2215 | tp->retransmit_high = tp->snd_una; |
2213 | 2216 | ||
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 1705476670ef..23883a48ebfb 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -108,6 +108,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
108 | u8 *xprth = skb_network_header(skb) + iph->ihl * 4; | 108 | u8 *xprth = skb_network_header(skb) + iph->ihl * 4; |
109 | 109 | ||
110 | memset(fl, 0, sizeof(struct flowi)); | 110 | memset(fl, 0, sizeof(struct flowi)); |
111 | fl->mark = skb->mark; | ||
112 | |||
111 | if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) { | 113 | if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) { |
112 | switch (iph->protocol) { | 114 | switch (iph->protocol) { |
113 | case IPPROTO_UDP: | 115 | case IPPROTO_UDP: |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e1a698df5706..784f34d11fdd 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1760,7 +1760,10 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev) | |||
1760 | 1760 | ||
1761 | idev = ipv6_find_idev(dev); | 1761 | idev = ipv6_find_idev(dev); |
1762 | if (!idev) | 1762 | if (!idev) |
1763 | return NULL; | 1763 | return ERR_PTR(-ENOBUFS); |
1764 | |||
1765 | if (idev->cnf.disable_ipv6) | ||
1766 | return ERR_PTR(-EACCES); | ||
1764 | 1767 | ||
1765 | /* Add default multicast route */ | 1768 | /* Add default multicast route */ |
1766 | addrconf_add_mroute(dev); | 1769 | addrconf_add_mroute(dev); |
@@ -2129,8 +2132,9 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx, | |||
2129 | if (!dev) | 2132 | if (!dev) |
2130 | return -ENODEV; | 2133 | return -ENODEV; |
2131 | 2134 | ||
2132 | if ((idev = addrconf_add_dev(dev)) == NULL) | 2135 | idev = addrconf_add_dev(dev); |
2133 | return -ENOBUFS; | 2136 | if (IS_ERR(idev)) |
2137 | return PTR_ERR(idev); | ||
2134 | 2138 | ||
2135 | scope = ipv6_addr_scope(pfx); | 2139 | scope = ipv6_addr_scope(pfx); |
2136 | 2140 | ||
@@ -2377,7 +2381,7 @@ static void addrconf_dev_config(struct net_device *dev) | |||
2377 | } | 2381 | } |
2378 | 2382 | ||
2379 | idev = addrconf_add_dev(dev); | 2383 | idev = addrconf_add_dev(dev); |
2380 | if (idev == NULL) | 2384 | if (IS_ERR(idev)) |
2381 | return; | 2385 | return; |
2382 | 2386 | ||
2383 | memset(&addr, 0, sizeof(struct in6_addr)); | 2387 | memset(&addr, 0, sizeof(struct in6_addr)); |
@@ -2468,7 +2472,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev) | |||
2468 | ASSERT_RTNL(); | 2472 | ASSERT_RTNL(); |
2469 | 2473 | ||
2470 | idev = addrconf_add_dev(dev); | 2474 | idev = addrconf_add_dev(dev); |
2471 | if (!idev) { | 2475 | if (IS_ERR(idev)) { |
2472 | printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); | 2476 | printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); |
2473 | return; | 2477 | return; |
2474 | } | 2478 | } |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index ce7992982557..03e62f94ff8e 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -483,7 +483,7 @@ route_done: | |||
483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, | 483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, |
484 | MSG_DONTWAIT, np->dontfrag); | 484 | MSG_DONTWAIT, np->dontfrag); |
485 | if (err) { | 485 | if (err) { |
486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
487 | ip6_flush_pending_frames(sk); | 487 | ip6_flush_pending_frames(sk); |
488 | goto out_put; | 488 | goto out_put; |
489 | } | 489 | } |
@@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
565 | np->dontfrag); | 565 | np->dontfrag); |
566 | 566 | ||
567 | if (err) { | 567 | if (err) { |
568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
569 | ip6_flush_pending_frames(sk); | 569 | ip6_flush_pending_frames(sk); |
570 | goto out_put; | 570 | goto out_put; |
571 | } | 571 | } |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 073071f2b75b..66078dad7fe8 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -120,7 +120,7 @@ static void mroute_clean_tables(struct mr6_table *mrt); | |||
120 | static void ipmr_expire_process(unsigned long arg); | 120 | static void ipmr_expire_process(unsigned long arg); |
121 | 121 | ||
122 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | 122 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES |
123 | #define ip6mr_for_each_table(mrt, met) \ | 123 | #define ip6mr_for_each_table(mrt, net) \ |
124 | list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) | 124 | list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) |
125 | 125 | ||
126 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) | 126 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) |
@@ -254,8 +254,10 @@ static void __net_exit ip6mr_rules_exit(struct net *net) | |||
254 | { | 254 | { |
255 | struct mr6_table *mrt, *next; | 255 | struct mr6_table *mrt, *next; |
256 | 256 | ||
257 | list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) | 257 | list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) { |
258 | list_del(&mrt->list); | ||
258 | ip6mr_free_table(mrt); | 259 | ip6mr_free_table(mrt); |
260 | } | ||
259 | fib_rules_unregister(net->ipv6.mr6_rules_ops); | 261 | fib_rules_unregister(net->ipv6.mr6_rules_ops); |
260 | } | 262 | } |
261 | #else | 263 | #else |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 59f1881968c7..ab1622d7d409 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1356,7 +1356,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) | |||
1356 | IPV6_TLV_PADN, 0 }; | 1356 | IPV6_TLV_PADN, 0 }; |
1357 | 1357 | ||
1358 | /* we assume size > sizeof(ra) here */ | 1358 | /* we assume size > sizeof(ra) here */ |
1359 | skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); | 1359 | size += LL_ALLOCATED_SPACE(dev); |
1360 | /* limit our allocations to order-0 page */ | ||
1361 | size = min_t(int, size, SKB_MAX_ORDER(0, 0)); | ||
1362 | skb = sock_alloc_send_skb(sk, size, 1, &err); | ||
1360 | 1363 | ||
1361 | if (!skb) | 1364 | if (!skb) |
1362 | return NULL; | 1365 | return NULL; |
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 2794b6002836..d6e9599d0705 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c | |||
@@ -347,11 +347,12 @@ static const struct xfrm_type mip6_destopt_type = | |||
347 | 347 | ||
348 | static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) | 348 | static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) |
349 | { | 349 | { |
350 | struct ipv6hdr *iph = ipv6_hdr(skb); | ||
350 | struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data; | 351 | struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data; |
351 | int err = rt2->rt_hdr.nexthdr; | 352 | int err = rt2->rt_hdr.nexthdr; |
352 | 353 | ||
353 | spin_lock(&x->lock); | 354 | spin_lock(&x->lock); |
354 | if (!ipv6_addr_equal(&rt2->addr, (struct in6_addr *)x->coaddr) && | 355 | if (!ipv6_addr_equal(&iph->daddr, (struct in6_addr *)x->coaddr) && |
355 | !ipv6_addr_any((struct in6_addr *)x->coaddr)) | 356 | !ipv6_addr_any((struct in6_addr *)x->coaddr)) |
356 | err = -ENOENT; | 357 | err = -ENOENT; |
357 | spin_unlock(&x->lock); | 358 | spin_unlock(&x->lock); |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 0abdc242ddb7..2efef52fb461 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -586,6 +586,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, | |||
586 | src_addr = solicited_addr; | 586 | src_addr = solicited_addr; |
587 | if (ifp->flags & IFA_F_OPTIMISTIC) | 587 | if (ifp->flags & IFA_F_OPTIMISTIC) |
588 | override = 0; | 588 | override = 0; |
589 | inc_opt |= ifp->idev->cnf.force_tllao; | ||
589 | in6_ifa_put(ifp); | 590 | in6_ifa_put(ifp); |
590 | } else { | 591 | } else { |
591 | if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, | 592 | if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, |
@@ -599,7 +600,6 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, | |||
599 | icmp6h.icmp6_solicited = solicited; | 600 | icmp6h.icmp6_solicited = solicited; |
600 | icmp6h.icmp6_override = override; | 601 | icmp6h.icmp6_override = override; |
601 | 602 | ||
602 | inc_opt |= ifp->idev->cnf.force_tllao; | ||
603 | __ndisc_send(dev, neigh, daddr, src_addr, | 603 | __ndisc_send(dev, neigh, daddr, src_addr, |
604 | &icmp6h, solicited_addr, | 604 | &icmp6h, solicited_addr, |
605 | inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); | 605 | inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); |
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 47d227713758..2933396e0281 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c | |||
@@ -97,9 +97,11 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) | |||
97 | fl.fl_ip_dport = otcph.source; | 97 | fl.fl_ip_dport = otcph.source; |
98 | security_skb_classify_flow(oldskb, &fl); | 98 | security_skb_classify_flow(oldskb, &fl); |
99 | dst = ip6_route_output(net, NULL, &fl); | 99 | dst = ip6_route_output(net, NULL, &fl); |
100 | if (dst == NULL) | 100 | if (dst == NULL || dst->error) { |
101 | dst_release(dst); | ||
101 | return; | 102 | return; |
102 | if (dst->error || xfrm_lookup(net, &dst, &fl, NULL, 0)) | 103 | } |
104 | if (xfrm_lookup(net, &dst, &fl, NULL, 0)) | ||
103 | return; | 105 | return; |
104 | 106 | ||
105 | hh_len = (dst->dev->hard_header_len + 15)&~15; | 107 | hh_len = (dst->dev->hard_header_len + 15)&~15; |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 4a0e77e14468..6baeabbbca82 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -124,6 +124,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
124 | u8 nexthdr = nh[IP6CB(skb)->nhoff]; | 124 | u8 nexthdr = nh[IP6CB(skb)->nhoff]; |
125 | 125 | ||
126 | memset(fl, 0, sizeof(struct flowi)); | 126 | memset(fl, 0, sizeof(struct flowi)); |
127 | fl->mark = skb->mark; | ||
128 | |||
127 | ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr); | 129 | ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr); |
128 | ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr); | 130 | ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr); |
129 | 131 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c7000a6ca379..67ee34f57df7 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -632,7 +632,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta) | |||
632 | skb->dev = sta->sdata->dev; | 632 | skb->dev = sta->sdata->dev; |
633 | skb->protocol = eth_type_trans(skb, sta->sdata->dev); | 633 | skb->protocol = eth_type_trans(skb, sta->sdata->dev); |
634 | memset(skb->cb, 0, sizeof(skb->cb)); | 634 | memset(skb->cb, 0, sizeof(skb->cb)); |
635 | netif_rx(skb); | 635 | netif_rx_ni(skb); |
636 | } | 636 | } |
637 | 637 | ||
638 | static void sta_apply_parameters(struct ieee80211_local *local, | 638 | static void sta_apply_parameters(struct ieee80211_local *local, |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 4f2271316650..9c1da0809160 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -349,7 +349,7 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx, | |||
349 | struct survey_info *survey) | 349 | struct survey_info *survey) |
350 | { | 350 | { |
351 | int ret = -EOPNOTSUPP; | 351 | int ret = -EOPNOTSUPP; |
352 | if (local->ops->conf_tx) | 352 | if (local->ops->get_survey) |
353 | ret = local->ops->get_survey(&local->hw, idx, survey); | 353 | ret = local->ops->get_survey(&local->hw, idx, survey); |
354 | /* trace_drv_get_survey(local, idx, survey, ret); */ | 354 | /* trace_drv_get_survey(local, idx, survey, ret); */ |
355 | return ret; | 355 | return ret; |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0839c4e8fd2e..f803f8b72a93 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1692,14 +1692,52 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1692 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); | 1692 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); |
1693 | break; | 1693 | break; |
1694 | case IEEE80211_STYPE_ACTION: | 1694 | case IEEE80211_STYPE_ACTION: |
1695 | if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) | 1695 | switch (mgmt->u.action.category) { |
1696 | case WLAN_CATEGORY_BACK: { | ||
1697 | struct ieee80211_local *local = sdata->local; | ||
1698 | int len = skb->len; | ||
1699 | struct sta_info *sta; | ||
1700 | |||
1701 | rcu_read_lock(); | ||
1702 | sta = sta_info_get(sdata, mgmt->sa); | ||
1703 | if (!sta) { | ||
1704 | rcu_read_unlock(); | ||
1705 | break; | ||
1706 | } | ||
1707 | |||
1708 | local_bh_disable(); | ||
1709 | |||
1710 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
1711 | case WLAN_ACTION_ADDBA_REQ: | ||
1712 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1713 | sizeof(mgmt->u.action.u.addba_req))) | ||
1714 | break; | ||
1715 | ieee80211_process_addba_request(local, sta, mgmt, len); | ||
1716 | break; | ||
1717 | case WLAN_ACTION_ADDBA_RESP: | ||
1718 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1719 | sizeof(mgmt->u.action.u.addba_resp))) | ||
1720 | break; | ||
1721 | ieee80211_process_addba_resp(local, sta, mgmt, len); | ||
1722 | break; | ||
1723 | case WLAN_ACTION_DELBA: | ||
1724 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1725 | sizeof(mgmt->u.action.u.delba))) | ||
1726 | break; | ||
1727 | ieee80211_process_delba(sdata, sta, mgmt, len); | ||
1728 | break; | ||
1729 | } | ||
1730 | local_bh_enable(); | ||
1731 | rcu_read_unlock(); | ||
1696 | break; | 1732 | break; |
1697 | 1733 | } | |
1698 | ieee80211_sta_process_chanswitch(sdata, | 1734 | case WLAN_CATEGORY_SPECTRUM_MGMT: |
1699 | &mgmt->u.action.u.chan_switch.sw_elem, | 1735 | ieee80211_sta_process_chanswitch(sdata, |
1700 | (void *)ifmgd->associated->priv, | 1736 | &mgmt->u.action.u.chan_switch.sw_elem, |
1701 | rx_status->mactime); | 1737 | (void *)ifmgd->associated->priv, |
1702 | break; | 1738 | rx_status->mactime); |
1739 | break; | ||
1740 | } | ||
1703 | } | 1741 | } |
1704 | mutex_unlock(&ifmgd->mtx); | 1742 | mutex_unlock(&ifmgd->mtx); |
1705 | 1743 | ||
@@ -1722,9 +1760,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1722 | mutex_unlock(&ifmgd->mtx); | 1760 | mutex_unlock(&ifmgd->mtx); |
1723 | 1761 | ||
1724 | if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && | 1762 | if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && |
1725 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) | 1763 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) { |
1726 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | 1764 | struct ieee80211_local *local = sdata->local; |
1765 | struct ieee80211_work *wk; | ||
1766 | |||
1767 | mutex_lock(&local->work_mtx); | ||
1768 | list_for_each_entry(wk, &local->work_list, list) { | ||
1769 | if (wk->sdata != sdata) | ||
1770 | continue; | ||
1771 | |||
1772 | if (wk->type != IEEE80211_WORK_ASSOC) | ||
1773 | continue; | ||
1774 | |||
1775 | if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN)) | ||
1776 | continue; | ||
1777 | if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN)) | ||
1778 | continue; | ||
1727 | 1779 | ||
1780 | /* | ||
1781 | * Printing the message only here means we can't | ||
1782 | * spuriously print it, but it also means that it | ||
1783 | * won't be printed when the frame comes in before | ||
1784 | * we even tried to associate or in similar cases. | ||
1785 | * | ||
1786 | * Ultimately, I suspect cfg80211 should print the | ||
1787 | * messages instead. | ||
1788 | */ | ||
1789 | printk(KERN_DEBUG | ||
1790 | "%s: deauthenticated from %pM (Reason: %u)\n", | ||
1791 | sdata->name, mgmt->bssid, | ||
1792 | le16_to_cpu(mgmt->u.deauth.reason_code)); | ||
1793 | |||
1794 | list_del_rcu(&wk->list); | ||
1795 | free_work(wk); | ||
1796 | break; | ||
1797 | } | ||
1798 | mutex_unlock(&local->work_mtx); | ||
1799 | |||
1800 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | ||
1801 | } | ||
1728 | out: | 1802 | out: |
1729 | kfree_skb(skb); | 1803 | kfree_skb(skb); |
1730 | } | 1804 | } |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 5e0b65406c44..be9abc2e6348 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1944,6 +1944,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1944 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | 1944 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) |
1945 | break; | 1945 | break; |
1946 | 1946 | ||
1947 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
1948 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); | ||
1949 | |||
1947 | switch (mgmt->u.action.u.addba_req.action_code) { | 1950 | switch (mgmt->u.action.u.addba_req.action_code) { |
1948 | case WLAN_ACTION_ADDBA_REQ: | 1951 | case WLAN_ACTION_ADDBA_REQ: |
1949 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 1952 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
diff --git a/net/mac80211/work.c b/net/mac80211/work.c index be3d4a698692..b025dc7bb0fd 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c | |||
@@ -715,7 +715,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, | |||
715 | struct ieee80211_rx_status *rx_status; | 715 | struct ieee80211_rx_status *rx_status; |
716 | struct ieee80211_mgmt *mgmt; | 716 | struct ieee80211_mgmt *mgmt; |
717 | struct ieee80211_work *wk; | 717 | struct ieee80211_work *wk; |
718 | enum work_action rma; | 718 | enum work_action rma = WORK_ACT_NONE; |
719 | u16 fc; | 719 | u16 fc; |
720 | 720 | ||
721 | rx_status = (struct ieee80211_rx_status *) skb->cb; | 721 | rx_status = (struct ieee80211_rx_status *) skb->cb; |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index d8f7e8ef67b4..ff04e9edbed6 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
@@ -162,6 +162,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) | |||
162 | hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); | 162 | hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); |
163 | 163 | ||
164 | ct_write_lock(hash); | 164 | ct_write_lock(hash); |
165 | spin_lock(&cp->lock); | ||
165 | 166 | ||
166 | if (!(cp->flags & IP_VS_CONN_F_HASHED)) { | 167 | if (!(cp->flags & IP_VS_CONN_F_HASHED)) { |
167 | list_add(&cp->c_list, &ip_vs_conn_tab[hash]); | 168 | list_add(&cp->c_list, &ip_vs_conn_tab[hash]); |
@@ -174,6 +175,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) | |||
174 | ret = 0; | 175 | ret = 0; |
175 | } | 176 | } |
176 | 177 | ||
178 | spin_unlock(&cp->lock); | ||
177 | ct_write_unlock(hash); | 179 | ct_write_unlock(hash); |
178 | 180 | ||
179 | return ret; | 181 | return ret; |
@@ -193,6 +195,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) | |||
193 | hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); | 195 | hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); |
194 | 196 | ||
195 | ct_write_lock(hash); | 197 | ct_write_lock(hash); |
198 | spin_lock(&cp->lock); | ||
196 | 199 | ||
197 | if (cp->flags & IP_VS_CONN_F_HASHED) { | 200 | if (cp->flags & IP_VS_CONN_F_HASHED) { |
198 | list_del(&cp->c_list); | 201 | list_del(&cp->c_list); |
@@ -202,6 +205,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) | |||
202 | } else | 205 | } else |
203 | ret = 0; | 206 | ret = 0; |
204 | 207 | ||
208 | spin_unlock(&cp->lock); | ||
205 | ct_write_unlock(hash); | 209 | ct_write_unlock(hash); |
206 | 210 | ||
207 | return ret; | 211 | return ret; |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 94d72e85a475..b2a3ae6cad78 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -698,6 +698,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) | |||
698 | newsk = NULL; | 698 | newsk = NULL; |
699 | goto out; | 699 | goto out; |
700 | } | 700 | } |
701 | kfree_skb(oskb); | ||
701 | 702 | ||
702 | sock_hold(sk); | 703 | sock_hold(sk); |
703 | pep_sk(newsk)->listener = sk; | 704 | pep_sk(newsk)->listener = sk; |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index c0b6863e3b87..1980b71c283f 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -33,6 +33,7 @@ | |||
33 | static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1]; | 33 | static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1]; |
34 | static u32 mirred_idx_gen; | 34 | static u32 mirred_idx_gen; |
35 | static DEFINE_RWLOCK(mirred_lock); | 35 | static DEFINE_RWLOCK(mirred_lock); |
36 | static LIST_HEAD(mirred_list); | ||
36 | 37 | ||
37 | static struct tcf_hashinfo mirred_hash_info = { | 38 | static struct tcf_hashinfo mirred_hash_info = { |
38 | .htab = tcf_mirred_ht, | 39 | .htab = tcf_mirred_ht, |
@@ -47,7 +48,9 @@ static inline int tcf_mirred_release(struct tcf_mirred *m, int bind) | |||
47 | m->tcf_bindcnt--; | 48 | m->tcf_bindcnt--; |
48 | m->tcf_refcnt--; | 49 | m->tcf_refcnt--; |
49 | if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { | 50 | if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { |
50 | dev_put(m->tcfm_dev); | 51 | list_del(&m->tcfm_list); |
52 | if (m->tcfm_dev) | ||
53 | dev_put(m->tcfm_dev); | ||
51 | tcf_hash_destroy(&m->common, &mirred_hash_info); | 54 | tcf_hash_destroy(&m->common, &mirred_hash_info); |
52 | return 1; | 55 | return 1; |
53 | } | 56 | } |
@@ -134,8 +137,10 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est, | |||
134 | m->tcfm_ok_push = ok_push; | 137 | m->tcfm_ok_push = ok_push; |
135 | } | 138 | } |
136 | spin_unlock_bh(&m->tcf_lock); | 139 | spin_unlock_bh(&m->tcf_lock); |
137 | if (ret == ACT_P_CREATED) | 140 | if (ret == ACT_P_CREATED) { |
141 | list_add(&m->tcfm_list, &mirred_list); | ||
138 | tcf_hash_insert(pc, &mirred_hash_info); | 142 | tcf_hash_insert(pc, &mirred_hash_info); |
143 | } | ||
139 | 144 | ||
140 | return ret; | 145 | return ret; |
141 | } | 146 | } |
@@ -162,9 +167,14 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, | |||
162 | m->tcf_tm.lastuse = jiffies; | 167 | m->tcf_tm.lastuse = jiffies; |
163 | 168 | ||
164 | dev = m->tcfm_dev; | 169 | dev = m->tcfm_dev; |
170 | if (!dev) { | ||
171 | printk_once(KERN_NOTICE "tc mirred: target device is gone\n"); | ||
172 | goto out; | ||
173 | } | ||
174 | |||
165 | if (!(dev->flags & IFF_UP)) { | 175 | if (!(dev->flags & IFF_UP)) { |
166 | if (net_ratelimit()) | 176 | if (net_ratelimit()) |
167 | pr_notice("tc mirred to Houston: device %s is gone!\n", | 177 | pr_notice("tc mirred to Houston: device %s is down\n", |
168 | dev->name); | 178 | dev->name); |
169 | goto out; | 179 | goto out; |
170 | } | 180 | } |
@@ -232,6 +242,28 @@ nla_put_failure: | |||
232 | return -1; | 242 | return -1; |
233 | } | 243 | } |
234 | 244 | ||
245 | static int mirred_device_event(struct notifier_block *unused, | ||
246 | unsigned long event, void *ptr) | ||
247 | { | ||
248 | struct net_device *dev = ptr; | ||
249 | struct tcf_mirred *m; | ||
250 | |||
251 | if (event == NETDEV_UNREGISTER) | ||
252 | list_for_each_entry(m, &mirred_list, tcfm_list) { | ||
253 | if (m->tcfm_dev == dev) { | ||
254 | dev_put(dev); | ||
255 | m->tcfm_dev = NULL; | ||
256 | } | ||
257 | } | ||
258 | |||
259 | return NOTIFY_DONE; | ||
260 | } | ||
261 | |||
262 | static struct notifier_block mirred_device_notifier = { | ||
263 | .notifier_call = mirred_device_event, | ||
264 | }; | ||
265 | |||
266 | |||
235 | static struct tc_action_ops act_mirred_ops = { | 267 | static struct tc_action_ops act_mirred_ops = { |
236 | .kind = "mirred", | 268 | .kind = "mirred", |
237 | .hinfo = &mirred_hash_info, | 269 | .hinfo = &mirred_hash_info, |
@@ -252,12 +284,17 @@ MODULE_LICENSE("GPL"); | |||
252 | 284 | ||
253 | static int __init mirred_init_module(void) | 285 | static int __init mirred_init_module(void) |
254 | { | 286 | { |
287 | int err = register_netdevice_notifier(&mirred_device_notifier); | ||
288 | if (err) | ||
289 | return err; | ||
290 | |||
255 | pr_info("Mirror/redirect action on\n"); | 291 | pr_info("Mirror/redirect action on\n"); |
256 | return tcf_register_action(&act_mirred_ops); | 292 | return tcf_register_action(&act_mirred_ops); |
257 | } | 293 | } |
258 | 294 | ||
259 | static void __exit mirred_cleanup_module(void) | 295 | static void __exit mirred_cleanup_module(void) |
260 | { | 296 | { |
297 | unregister_netdevice_notifier(&mirred_device_notifier); | ||
261 | tcf_unregister_action(&act_mirred_ops); | 298 | tcf_unregister_action(&act_mirred_ops); |
262 | } | 299 | } |
263 | 300 | ||
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 570949417f38..724553e8ed7b 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -205,7 +205,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
205 | { | 205 | { |
206 | struct icmphdr *icmph; | 206 | struct icmphdr *icmph; |
207 | 207 | ||
208 | if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) | 208 | if (!pskb_may_pull(skb, ihl + sizeof(*icmph))) |
209 | goto drop; | 209 | goto drop; |
210 | 210 | ||
211 | icmph = (void *)(skb_network_header(skb) + ihl); | 211 | icmph = (void *)(skb_network_header(skb) + ihl); |
@@ -215,6 +215,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
215 | (icmph->type != ICMP_PARAMETERPROB)) | 215 | (icmph->type != ICMP_PARAMETERPROB)) |
216 | break; | 216 | break; |
217 | 217 | ||
218 | if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) | ||
219 | goto drop; | ||
220 | |||
218 | iph = (void *)(icmph + 1); | 221 | iph = (void *)(icmph + 1); |
219 | if (egress) | 222 | if (egress) |
220 | addr = iph->daddr; | 223 | addr = iph->daddr; |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 3415b6ce1c0a..807643bdcbac 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -449,6 +449,7 @@ static __init void teql_master_setup(struct net_device *dev) | |||
449 | dev->tx_queue_len = 100; | 449 | dev->tx_queue_len = 100; |
450 | dev->flags = IFF_NOARP; | 450 | dev->flags = IFF_NOARP; |
451 | dev->hard_header_len = LL_MAX_HEADER; | 451 | dev->hard_header_len = LL_MAX_HEADER; |
452 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | ||
452 | } | 453 | } |
453 | 454 | ||
454 | static LIST_HEAD(master_dev_list); | 455 | static LIST_HEAD(master_dev_list); |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 73affb8624fa..8dc47f1d0001 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -267,7 +267,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) | |||
267 | * Run memory cache shrinker. | 267 | * Run memory cache shrinker. |
268 | */ | 268 | */ |
269 | static int | 269 | static int |
270 | rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) | 270 | rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
271 | { | 271 | { |
272 | LIST_HEAD(free); | 272 | LIST_HEAD(free); |
273 | int res; | 273 | int res; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 2a9675136c68..7ca65c7005ea 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -210,7 +210,8 @@ struct sock_xprt { | |||
210 | * State of TCP reply receive | 210 | * State of TCP reply receive |
211 | */ | 211 | */ |
212 | __be32 tcp_fraghdr, | 212 | __be32 tcp_fraghdr, |
213 | tcp_xid; | 213 | tcp_xid, |
214 | tcp_calldir; | ||
214 | 215 | ||
215 | u32 tcp_offset, | 216 | u32 tcp_offset, |
216 | tcp_reclen; | 217 | tcp_reclen; |
@@ -927,7 +928,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport, | |||
927 | { | 928 | { |
928 | size_t len, used; | 929 | size_t len, used; |
929 | u32 offset; | 930 | u32 offset; |
930 | __be32 calldir; | 931 | char *p; |
931 | 932 | ||
932 | /* | 933 | /* |
933 | * We want transport->tcp_offset to be 8 at the end of this routine | 934 | * We want transport->tcp_offset to be 8 at the end of this routine |
@@ -936,26 +937,33 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport, | |||
936 | * transport->tcp_offset is 4 (after having already read the xid). | 937 | * transport->tcp_offset is 4 (after having already read the xid). |
937 | */ | 938 | */ |
938 | offset = transport->tcp_offset - sizeof(transport->tcp_xid); | 939 | offset = transport->tcp_offset - sizeof(transport->tcp_xid); |
939 | len = sizeof(calldir) - offset; | 940 | len = sizeof(transport->tcp_calldir) - offset; |
940 | dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len); | 941 | dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len); |
941 | used = xdr_skb_read_bits(desc, &calldir, len); | 942 | p = ((char *) &transport->tcp_calldir) + offset; |
943 | used = xdr_skb_read_bits(desc, p, len); | ||
942 | transport->tcp_offset += used; | 944 | transport->tcp_offset += used; |
943 | if (used != len) | 945 | if (used != len) |
944 | return; | 946 | return; |
945 | transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR; | 947 | transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR; |
946 | transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; | ||
947 | transport->tcp_flags |= TCP_RCV_COPY_DATA; | ||
948 | /* | 948 | /* |
949 | * We don't yet have the XDR buffer, so we will write the calldir | 949 | * We don't yet have the XDR buffer, so we will write the calldir |
950 | * out after we get the buffer from the 'struct rpc_rqst' | 950 | * out after we get the buffer from the 'struct rpc_rqst' |
951 | */ | 951 | */ |
952 | if (ntohl(calldir) == RPC_REPLY) | 952 | switch (ntohl(transport->tcp_calldir)) { |
953 | case RPC_REPLY: | ||
954 | transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; | ||
955 | transport->tcp_flags |= TCP_RCV_COPY_DATA; | ||
953 | transport->tcp_flags |= TCP_RPC_REPLY; | 956 | transport->tcp_flags |= TCP_RPC_REPLY; |
954 | else | 957 | break; |
958 | case RPC_CALL: | ||
959 | transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; | ||
960 | transport->tcp_flags |= TCP_RCV_COPY_DATA; | ||
955 | transport->tcp_flags &= ~TCP_RPC_REPLY; | 961 | transport->tcp_flags &= ~TCP_RPC_REPLY; |
956 | dprintk("RPC: reading %s CALL/REPLY flag %08x\n", | 962 | break; |
957 | (transport->tcp_flags & TCP_RPC_REPLY) ? | 963 | default: |
958 | "reply for" : "request with", calldir); | 964 | dprintk("RPC: invalid request message type\n"); |
965 | xprt_force_disconnect(&transport->xprt); | ||
966 | } | ||
959 | xs_tcp_check_fraghdr(transport); | 967 | xs_tcp_check_fraghdr(transport); |
960 | } | 968 | } |
961 | 969 | ||
@@ -975,12 +983,10 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt, | |||
975 | /* | 983 | /* |
976 | * Save the RPC direction in the XDR buffer | 984 | * Save the RPC direction in the XDR buffer |
977 | */ | 985 | */ |
978 | __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ? | ||
979 | htonl(RPC_REPLY) : 0; | ||
980 | |||
981 | memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied, | 986 | memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied, |
982 | &calldir, sizeof(calldir)); | 987 | &transport->tcp_calldir, |
983 | transport->tcp_copied += sizeof(calldir); | 988 | sizeof(transport->tcp_calldir)); |
989 | transport->tcp_copied += sizeof(transport->tcp_calldir); | ||
984 | transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR; | 990 | transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR; |
985 | } | 991 | } |
986 | 992 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 4bf27d901333..a7ec5a8a2380 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1594,8 +1594,8 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, | |||
1594 | 1594 | ||
1595 | /* Try to instantiate a bundle */ | 1595 | /* Try to instantiate a bundle */ |
1596 | err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); | 1596 | err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); |
1597 | if (err < 0) { | 1597 | if (err <= 0) { |
1598 | if (err != -EAGAIN) | 1598 | if (err != 0 && err != -EAGAIN) |
1599 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); | 1599 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); |
1600 | return ERR_PTR(err); | 1600 | return ERR_PTR(err); |
1601 | } | 1601 | } |
@@ -1678,6 +1678,13 @@ xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir, | |||
1678 | goto make_dummy_bundle; | 1678 | goto make_dummy_bundle; |
1679 | dst_hold(&xdst->u.dst); | 1679 | dst_hold(&xdst->u.dst); |
1680 | return oldflo; | 1680 | return oldflo; |
1681 | } else if (new_xdst == NULL) { | ||
1682 | num_xfrms = 0; | ||
1683 | if (oldflo == NULL) | ||
1684 | goto make_dummy_bundle; | ||
1685 | xdst->num_xfrms = 0; | ||
1686 | dst_hold(&xdst->u.dst); | ||
1687 | return oldflo; | ||
1681 | } | 1688 | } |
1682 | 1689 | ||
1683 | /* Kill the previous bundle */ | 1690 | /* Kill the previous bundle */ |
@@ -1760,6 +1767,10 @@ restart: | |||
1760 | xfrm_pols_put(pols, num_pols); | 1767 | xfrm_pols_put(pols, num_pols); |
1761 | err = PTR_ERR(xdst); | 1768 | err = PTR_ERR(xdst); |
1762 | goto dropdst; | 1769 | goto dropdst; |
1770 | } else if (xdst == NULL) { | ||
1771 | num_xfrms = 0; | ||
1772 | drop_pols = num_pols; | ||
1773 | goto no_transform; | ||
1763 | } | 1774 | } |
1764 | 1775 | ||
1765 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); | 1776 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); |
@@ -2300,7 +2311,8 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, | |||
2300 | return 0; | 2311 | return 0; |
2301 | if (xdst->xfrm_genid != dst->xfrm->genid) | 2312 | if (xdst->xfrm_genid != dst->xfrm->genid) |
2302 | return 0; | 2313 | return 0; |
2303 | if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) | 2314 | if (xdst->num_pols > 0 && |
2315 | xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) | ||
2304 | return 0; | 2316 | return 0; |
2305 | 2317 | ||
2306 | if (strict && fl && | 2318 | if (strict && fl && |