diff options
Diffstat (limited to 'net')
65 files changed, 465 insertions, 355 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index fbd0acf80b13..2fdebabbfacd 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -976,7 +976,8 @@ static int ax25_release(struct socket *sock) | |||
976 | release_sock(sk); | 976 | release_sock(sk); |
977 | ax25_disconnect(ax25, 0); | 977 | ax25_disconnect(ax25, 0); |
978 | lock_sock(sk); | 978 | lock_sock(sk); |
979 | ax25_destroy_socket(ax25); | 979 | if (!sock_flag(ax25->sk, SOCK_DESTROY)) |
980 | ax25_destroy_socket(ax25); | ||
980 | break; | 981 | break; |
981 | 982 | ||
982 | case AX25_STATE_3: | 983 | case AX25_STATE_3: |
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c index 951cd57bb07d..5237dff6941d 100644 --- a/net/ax25/ax25_ds_timer.c +++ b/net/ax25/ax25_ds_timer.c | |||
@@ -102,6 +102,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25) | |||
102 | switch (ax25->state) { | 102 | switch (ax25->state) { |
103 | 103 | ||
104 | case AX25_STATE_0: | 104 | case AX25_STATE_0: |
105 | case AX25_STATE_2: | ||
105 | /* Magic here: If we listen() and a new link dies before it | 106 | /* Magic here: If we listen() and a new link dies before it |
106 | is accepted() it isn't 'dead' so doesn't get removed. */ | 107 | is accepted() it isn't 'dead' so doesn't get removed. */ |
107 | if (!sk || sock_flag(sk, SOCK_DESTROY) || | 108 | if (!sk || sock_flag(sk, SOCK_DESTROY) || |
@@ -111,6 +112,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25) | |||
111 | sock_hold(sk); | 112 | sock_hold(sk); |
112 | ax25_destroy_socket(ax25); | 113 | ax25_destroy_socket(ax25); |
113 | bh_unlock_sock(sk); | 114 | bh_unlock_sock(sk); |
115 | /* Ungrab socket and destroy it */ | ||
114 | sock_put(sk); | 116 | sock_put(sk); |
115 | } else | 117 | } else |
116 | ax25_destroy_socket(ax25); | 118 | ax25_destroy_socket(ax25); |
@@ -213,7 +215,8 @@ void ax25_ds_t1_timeout(ax25_cb *ax25) | |||
213 | case AX25_STATE_2: | 215 | case AX25_STATE_2: |
214 | if (ax25->n2count == ax25->n2) { | 216 | if (ax25->n2count == ax25->n2) { |
215 | ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); | 217 | ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); |
216 | ax25_disconnect(ax25, ETIMEDOUT); | 218 | if (!sock_flag(ax25->sk, SOCK_DESTROY)) |
219 | ax25_disconnect(ax25, ETIMEDOUT); | ||
217 | return; | 220 | return; |
218 | } else { | 221 | } else { |
219 | ax25->n2count++; | 222 | ax25->n2count++; |
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c index 004467c9e6e1..2c0d6ef66f9d 100644 --- a/net/ax25/ax25_std_timer.c +++ b/net/ax25/ax25_std_timer.c | |||
@@ -38,6 +38,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25) | |||
38 | 38 | ||
39 | switch (ax25->state) { | 39 | switch (ax25->state) { |
40 | case AX25_STATE_0: | 40 | case AX25_STATE_0: |
41 | case AX25_STATE_2: | ||
41 | /* Magic here: If we listen() and a new link dies before it | 42 | /* Magic here: If we listen() and a new link dies before it |
42 | is accepted() it isn't 'dead' so doesn't get removed. */ | 43 | is accepted() it isn't 'dead' so doesn't get removed. */ |
43 | if (!sk || sock_flag(sk, SOCK_DESTROY) || | 44 | if (!sk || sock_flag(sk, SOCK_DESTROY) || |
@@ -47,6 +48,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25) | |||
47 | sock_hold(sk); | 48 | sock_hold(sk); |
48 | ax25_destroy_socket(ax25); | 49 | ax25_destroy_socket(ax25); |
49 | bh_unlock_sock(sk); | 50 | bh_unlock_sock(sk); |
51 | /* Ungrab socket and destroy it */ | ||
50 | sock_put(sk); | 52 | sock_put(sk); |
51 | } else | 53 | } else |
52 | ax25_destroy_socket(ax25); | 54 | ax25_destroy_socket(ax25); |
@@ -144,7 +146,8 @@ void ax25_std_t1timer_expiry(ax25_cb *ax25) | |||
144 | case AX25_STATE_2: | 146 | case AX25_STATE_2: |
145 | if (ax25->n2count == ax25->n2) { | 147 | if (ax25->n2count == ax25->n2) { |
146 | ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); | 148 | ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); |
147 | ax25_disconnect(ax25, ETIMEDOUT); | 149 | if (!sock_flag(ax25->sk, SOCK_DESTROY)) |
150 | ax25_disconnect(ax25, ETIMEDOUT); | ||
148 | return; | 151 | return; |
149 | } else { | 152 | } else { |
150 | ax25->n2count++; | 153 | ax25->n2count++; |
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c index 3b78e8473a01..655a7d4c96e1 100644 --- a/net/ax25/ax25_subr.c +++ b/net/ax25/ax25_subr.c | |||
@@ -264,7 +264,8 @@ void ax25_disconnect(ax25_cb *ax25, int reason) | |||
264 | { | 264 | { |
265 | ax25_clear_queues(ax25); | 265 | ax25_clear_queues(ax25); |
266 | 266 | ||
267 | ax25_stop_heartbeat(ax25); | 267 | if (!sock_flag(ax25->sk, SOCK_DESTROY)) |
268 | ax25_stop_heartbeat(ax25); | ||
268 | ax25_stop_t1timer(ax25); | 269 | ax25_stop_t1timer(ax25); |
269 | ax25_stop_t2timer(ax25); | 270 | ax25_stop_t2timer(ax25); |
270 | ax25_stop_t3timer(ax25); | 271 | ax25_stop_t3timer(ax25); |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index f75091c983ee..396c0134c5ab 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -374,6 +374,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
374 | if (skb_cow(skb, ETH_HLEN) < 0) | 374 | if (skb_cow(skb, ETH_HLEN) < 0) |
375 | goto out; | 375 | goto out; |
376 | 376 | ||
377 | ethhdr = eth_hdr(skb); | ||
377 | icmph = (struct batadv_icmp_header *)skb->data; | 378 | icmph = (struct batadv_icmp_header *)skb->data; |
378 | icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph; | 379 | icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph; |
379 | if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN) | 380 | if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN) |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 343d2c904399..287a3879ed7e 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -1033,7 +1033,9 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface) | |||
1033 | static void batadv_softif_destroy_netlink(struct net_device *soft_iface, | 1033 | static void batadv_softif_destroy_netlink(struct net_device *soft_iface, |
1034 | struct list_head *head) | 1034 | struct list_head *head) |
1035 | { | 1035 | { |
1036 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); | ||
1036 | struct batadv_hard_iface *hard_iface; | 1037 | struct batadv_hard_iface *hard_iface; |
1038 | struct batadv_softif_vlan *vlan; | ||
1037 | 1039 | ||
1038 | list_for_each_entry(hard_iface, &batadv_hardif_list, list) { | 1040 | list_for_each_entry(hard_iface, &batadv_hardif_list, list) { |
1039 | if (hard_iface->soft_iface == soft_iface) | 1041 | if (hard_iface->soft_iface == soft_iface) |
@@ -1041,6 +1043,13 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface, | |||
1041 | BATADV_IF_CLEANUP_KEEP); | 1043 | BATADV_IF_CLEANUP_KEEP); |
1042 | } | 1044 | } |
1043 | 1045 | ||
1046 | /* destroy the "untagged" VLAN */ | ||
1047 | vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS); | ||
1048 | if (vlan) { | ||
1049 | batadv_softif_destroy_vlan(bat_priv, vlan); | ||
1050 | batadv_softif_vlan_put(vlan); | ||
1051 | } | ||
1052 | |||
1044 | batadv_sysfs_del_meshif(soft_iface); | 1053 | batadv_sysfs_del_meshif(soft_iface); |
1045 | unregister_netdevice_queue(soft_iface, head); | 1054 | unregister_netdevice_queue(soft_iface, head); |
1046 | } | 1055 | } |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index feaf492b01ca..57ec87f37050 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -650,8 +650,10 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, | |||
650 | 650 | ||
651 | /* increase the refcounter of the related vlan */ | 651 | /* increase the refcounter of the related vlan */ |
652 | vlan = batadv_softif_vlan_get(bat_priv, vid); | 652 | vlan = batadv_softif_vlan_get(bat_priv, vid); |
653 | if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d", | 653 | if (!vlan) { |
654 | addr, BATADV_PRINT_VID(vid))) { | 654 | net_ratelimited_function(batadv_info, soft_iface, |
655 | "adding TT local entry %pM to non-existent VLAN %d\n", | ||
656 | addr, BATADV_PRINT_VID(vid)); | ||
655 | kfree(tt_local); | 657 | kfree(tt_local); |
656 | tt_local = NULL; | 658 | tt_local = NULL; |
657 | goto out; | 659 | goto out; |
@@ -691,7 +693,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, | |||
691 | if (unlikely(hash_added != 0)) { | 693 | if (unlikely(hash_added != 0)) { |
692 | /* remove the reference for the hash */ | 694 | /* remove the reference for the hash */ |
693 | batadv_tt_local_entry_put(tt_local); | 695 | batadv_tt_local_entry_put(tt_local); |
694 | batadv_softif_vlan_put(vlan); | ||
695 | goto out; | 696 | goto out; |
696 | } | 697 | } |
697 | 698 | ||
@@ -2269,6 +2270,29 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv, | |||
2269 | return crc; | 2270 | return crc; |
2270 | } | 2271 | } |
2271 | 2272 | ||
2273 | /** | ||
2274 | * batadv_tt_req_node_release - free tt_req node entry | ||
2275 | * @ref: kref pointer of the tt req_node entry | ||
2276 | */ | ||
2277 | static void batadv_tt_req_node_release(struct kref *ref) | ||
2278 | { | ||
2279 | struct batadv_tt_req_node *tt_req_node; | ||
2280 | |||
2281 | tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount); | ||
2282 | |||
2283 | kfree(tt_req_node); | ||
2284 | } | ||
2285 | |||
2286 | /** | ||
2287 | * batadv_tt_req_node_put - decrement the tt_req_node refcounter and | ||
2288 | * possibly release it | ||
2289 | * @tt_req_node: tt_req_node to be free'd | ||
2290 | */ | ||
2291 | static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node) | ||
2292 | { | ||
2293 | kref_put(&tt_req_node->refcount, batadv_tt_req_node_release); | ||
2294 | } | ||
2295 | |||
2272 | static void batadv_tt_req_list_free(struct batadv_priv *bat_priv) | 2296 | static void batadv_tt_req_list_free(struct batadv_priv *bat_priv) |
2273 | { | 2297 | { |
2274 | struct batadv_tt_req_node *node; | 2298 | struct batadv_tt_req_node *node; |
@@ -2278,7 +2302,7 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv) | |||
2278 | 2302 | ||
2279 | hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { | 2303 | hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { |
2280 | hlist_del_init(&node->list); | 2304 | hlist_del_init(&node->list); |
2281 | kfree(node); | 2305 | batadv_tt_req_node_put(node); |
2282 | } | 2306 | } |
2283 | 2307 | ||
2284 | spin_unlock_bh(&bat_priv->tt.req_list_lock); | 2308 | spin_unlock_bh(&bat_priv->tt.req_list_lock); |
@@ -2315,7 +2339,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv) | |||
2315 | if (batadv_has_timed_out(node->issued_at, | 2339 | if (batadv_has_timed_out(node->issued_at, |
2316 | BATADV_TT_REQUEST_TIMEOUT)) { | 2340 | BATADV_TT_REQUEST_TIMEOUT)) { |
2317 | hlist_del_init(&node->list); | 2341 | hlist_del_init(&node->list); |
2318 | kfree(node); | 2342 | batadv_tt_req_node_put(node); |
2319 | } | 2343 | } |
2320 | } | 2344 | } |
2321 | spin_unlock_bh(&bat_priv->tt.req_list_lock); | 2345 | spin_unlock_bh(&bat_priv->tt.req_list_lock); |
@@ -2347,9 +2371,11 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv, | |||
2347 | if (!tt_req_node) | 2371 | if (!tt_req_node) |
2348 | goto unlock; | 2372 | goto unlock; |
2349 | 2373 | ||
2374 | kref_init(&tt_req_node->refcount); | ||
2350 | ether_addr_copy(tt_req_node->addr, orig_node->orig); | 2375 | ether_addr_copy(tt_req_node->addr, orig_node->orig); |
2351 | tt_req_node->issued_at = jiffies; | 2376 | tt_req_node->issued_at = jiffies; |
2352 | 2377 | ||
2378 | kref_get(&tt_req_node->refcount); | ||
2353 | hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list); | 2379 | hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list); |
2354 | unlock: | 2380 | unlock: |
2355 | spin_unlock_bh(&bat_priv->tt.req_list_lock); | 2381 | spin_unlock_bh(&bat_priv->tt.req_list_lock); |
@@ -2613,13 +2639,19 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv, | |||
2613 | out: | 2639 | out: |
2614 | if (primary_if) | 2640 | if (primary_if) |
2615 | batadv_hardif_put(primary_if); | 2641 | batadv_hardif_put(primary_if); |
2642 | |||
2616 | if (ret && tt_req_node) { | 2643 | if (ret && tt_req_node) { |
2617 | spin_lock_bh(&bat_priv->tt.req_list_lock); | 2644 | spin_lock_bh(&bat_priv->tt.req_list_lock); |
2618 | /* hlist_del_init() verifies tt_req_node still is in the list */ | 2645 | if (!hlist_unhashed(&tt_req_node->list)) { |
2619 | hlist_del_init(&tt_req_node->list); | 2646 | hlist_del_init(&tt_req_node->list); |
2647 | batadv_tt_req_node_put(tt_req_node); | ||
2648 | } | ||
2620 | spin_unlock_bh(&bat_priv->tt.req_list_lock); | 2649 | spin_unlock_bh(&bat_priv->tt.req_list_lock); |
2621 | kfree(tt_req_node); | ||
2622 | } | 2650 | } |
2651 | |||
2652 | if (tt_req_node) | ||
2653 | batadv_tt_req_node_put(tt_req_node); | ||
2654 | |||
2623 | kfree(tvlv_tt_data); | 2655 | kfree(tvlv_tt_data); |
2624 | return ret; | 2656 | return ret; |
2625 | } | 2657 | } |
@@ -3055,7 +3087,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv, | |||
3055 | if (!batadv_compare_eth(node->addr, resp_src)) | 3087 | if (!batadv_compare_eth(node->addr, resp_src)) |
3056 | continue; | 3088 | continue; |
3057 | hlist_del_init(&node->list); | 3089 | hlist_del_init(&node->list); |
3058 | kfree(node); | 3090 | batadv_tt_req_node_put(node); |
3059 | } | 3091 | } |
3060 | 3092 | ||
3061 | spin_unlock_bh(&bat_priv->tt.req_list_lock); | 3093 | spin_unlock_bh(&bat_priv->tt.req_list_lock); |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 6a577f4f8ba7..ba846b078af8 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -1137,11 +1137,13 @@ struct batadv_tt_change_node { | |||
1137 | * struct batadv_tt_req_node - data to keep track of the tt requests in flight | 1137 | * struct batadv_tt_req_node - data to keep track of the tt requests in flight |
1138 | * @addr: mac address address of the originator this request was sent to | 1138 | * @addr: mac address address of the originator this request was sent to |
1139 | * @issued_at: timestamp used for purging stale tt requests | 1139 | * @issued_at: timestamp used for purging stale tt requests |
1140 | * @refcount: number of contexts the object is used by | ||
1140 | * @list: list node for batadv_priv_tt::req_list | 1141 | * @list: list node for batadv_priv_tt::req_list |
1141 | */ | 1142 | */ |
1142 | struct batadv_tt_req_node { | 1143 | struct batadv_tt_req_node { |
1143 | u8 addr[ETH_ALEN]; | 1144 | u8 addr[ETH_ALEN]; |
1144 | unsigned long issued_at; | 1145 | unsigned long issued_at; |
1146 | struct kref refcount; | ||
1145 | struct hlist_node list; | 1147 | struct hlist_node list; |
1146 | }; | 1148 | }; |
1147 | 1149 | ||
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 160797722228..43d2cd862bc2 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -213,8 +213,7 @@ drop: | |||
213 | } | 213 | } |
214 | EXPORT_SYMBOL_GPL(br_handle_frame_finish); | 214 | EXPORT_SYMBOL_GPL(br_handle_frame_finish); |
215 | 215 | ||
216 | /* note: already called with rcu_read_lock */ | 216 | static void __br_handle_local_finish(struct sk_buff *skb) |
217 | static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | ||
218 | { | 217 | { |
219 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); | 218 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); |
220 | u16 vid = 0; | 219 | u16 vid = 0; |
@@ -222,6 +221,14 @@ static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_bu | |||
222 | /* check if vlan is allowed, to avoid spoofing */ | 221 | /* check if vlan is allowed, to avoid spoofing */ |
223 | if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid)) | 222 | if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid)) |
224 | br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false); | 223 | br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false); |
224 | } | ||
225 | |||
226 | /* note: already called with rcu_read_lock */ | ||
227 | static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | ||
228 | { | ||
229 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); | ||
230 | |||
231 | __br_handle_local_finish(skb); | ||
225 | 232 | ||
226 | BR_INPUT_SKB_CB(skb)->brdev = p->br->dev; | 233 | BR_INPUT_SKB_CB(skb)->brdev = p->br->dev; |
227 | br_pass_frame_up(skb); | 234 | br_pass_frame_up(skb); |
@@ -274,7 +281,9 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) | |||
274 | if (p->br->stp_enabled == BR_NO_STP || | 281 | if (p->br->stp_enabled == BR_NO_STP || |
275 | fwd_mask & (1u << dest[5])) | 282 | fwd_mask & (1u << dest[5])) |
276 | goto forward; | 283 | goto forward; |
277 | break; | 284 | *pskb = skb; |
285 | __br_handle_local_finish(skb); | ||
286 | return RX_HANDLER_PASS; | ||
278 | 287 | ||
279 | case 0x01: /* IEEE MAC (Pause) */ | 288 | case 0x01: /* IEEE MAC (Pause) */ |
280 | goto drop; | 289 | goto drop; |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 6852f3c7009c..43844144c9c4 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -464,8 +464,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, | |||
464 | if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, | 464 | if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, |
465 | &ip6h->saddr)) { | 465 | &ip6h->saddr)) { |
466 | kfree_skb(skb); | 466 | kfree_skb(skb); |
467 | br->has_ipv6_addr = 0; | ||
467 | return NULL; | 468 | return NULL; |
468 | } | 469 | } |
470 | |||
471 | br->has_ipv6_addr = 1; | ||
469 | ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); | 472 | ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); |
470 | 473 | ||
471 | hopopt = (u8 *)(ip6h + 1); | 474 | hopopt = (u8 *)(ip6h + 1); |
@@ -1745,6 +1748,7 @@ void br_multicast_init(struct net_bridge *br) | |||
1745 | br->ip6_other_query.delay_time = 0; | 1748 | br->ip6_other_query.delay_time = 0; |
1746 | br->ip6_querier.port = NULL; | 1749 | br->ip6_querier.port = NULL; |
1747 | #endif | 1750 | #endif |
1751 | br->has_ipv6_addr = 1; | ||
1748 | 1752 | ||
1749 | spin_lock_init(&br->multicast_lock); | 1753 | spin_lock_init(&br->multicast_lock); |
1750 | setup_timer(&br->multicast_router_timer, | 1754 | setup_timer(&br->multicast_router_timer, |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index a5343c7232bf..85e89f693589 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -1273,7 +1273,7 @@ static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev, | |||
1273 | struct bridge_vlan_xstats vxi; | 1273 | struct bridge_vlan_xstats vxi; |
1274 | struct br_vlan_stats stats; | 1274 | struct br_vlan_stats stats; |
1275 | 1275 | ||
1276 | if (vl_idx++ < *prividx) | 1276 | if (++vl_idx < *prividx) |
1277 | continue; | 1277 | continue; |
1278 | memset(&vxi, 0, sizeof(vxi)); | 1278 | memset(&vxi, 0, sizeof(vxi)); |
1279 | vxi.vid = v->vid; | 1279 | vxi.vid = v->vid; |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c7fb5d7a7218..52edecf3c294 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -314,6 +314,7 @@ struct net_bridge | |||
314 | u8 multicast_disabled:1; | 314 | u8 multicast_disabled:1; |
315 | u8 multicast_querier:1; | 315 | u8 multicast_querier:1; |
316 | u8 multicast_query_use_ifaddr:1; | 316 | u8 multicast_query_use_ifaddr:1; |
317 | u8 has_ipv6_addr:1; | ||
317 | 318 | ||
318 | u32 hash_elasticity; | 319 | u32 hash_elasticity; |
319 | u32 hash_max; | 320 | u32 hash_max; |
@@ -588,10 +589,22 @@ static inline bool br_multicast_is_router(struct net_bridge *br) | |||
588 | 589 | ||
589 | static inline bool | 590 | static inline bool |
590 | __br_multicast_querier_exists(struct net_bridge *br, | 591 | __br_multicast_querier_exists(struct net_bridge *br, |
591 | struct bridge_mcast_other_query *querier) | 592 | struct bridge_mcast_other_query *querier, |
593 | const bool is_ipv6) | ||
592 | { | 594 | { |
595 | bool own_querier_enabled; | ||
596 | |||
597 | if (br->multicast_querier) { | ||
598 | if (is_ipv6 && !br->has_ipv6_addr) | ||
599 | own_querier_enabled = false; | ||
600 | else | ||
601 | own_querier_enabled = true; | ||
602 | } else { | ||
603 | own_querier_enabled = false; | ||
604 | } | ||
605 | |||
593 | return time_is_before_jiffies(querier->delay_time) && | 606 | return time_is_before_jiffies(querier->delay_time) && |
594 | (br->multicast_querier || timer_pending(&querier->timer)); | 607 | (own_querier_enabled || timer_pending(&querier->timer)); |
595 | } | 608 | } |
596 | 609 | ||
597 | static inline bool br_multicast_querier_exists(struct net_bridge *br, | 610 | static inline bool br_multicast_querier_exists(struct net_bridge *br, |
@@ -599,10 +612,12 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br, | |||
599 | { | 612 | { |
600 | switch (eth->h_proto) { | 613 | switch (eth->h_proto) { |
601 | case (htons(ETH_P_IP)): | 614 | case (htons(ETH_P_IP)): |
602 | return __br_multicast_querier_exists(br, &br->ip4_other_query); | 615 | return __br_multicast_querier_exists(br, |
616 | &br->ip4_other_query, false); | ||
603 | #if IS_ENABLED(CONFIG_IPV6) | 617 | #if IS_ENABLED(CONFIG_IPV6) |
604 | case (htons(ETH_P_IPV6)): | 618 | case (htons(ETH_P_IPV6)): |
605 | return __br_multicast_querier_exists(br, &br->ip6_other_query); | 619 | return __br_multicast_querier_exists(br, |
620 | &br->ip6_other_query, true); | ||
606 | #endif | 621 | #endif |
607 | default: | 622 | default: |
608 | return false; | 623 | return false; |
diff --git a/net/core/filter.c b/net/core/filter.c index df6860c85d72..cb9fc16cac46 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2105,7 +2105,8 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type) | |||
2105 | } | 2105 | } |
2106 | 2106 | ||
2107 | static bool sk_filter_is_valid_access(int off, int size, | 2107 | static bool sk_filter_is_valid_access(int off, int size, |
2108 | enum bpf_access_type type) | 2108 | enum bpf_access_type type, |
2109 | enum bpf_reg_type *reg_type) | ||
2109 | { | 2110 | { |
2110 | switch (off) { | 2111 | switch (off) { |
2111 | case offsetof(struct __sk_buff, tc_classid): | 2112 | case offsetof(struct __sk_buff, tc_classid): |
@@ -2128,7 +2129,8 @@ static bool sk_filter_is_valid_access(int off, int size, | |||
2128 | } | 2129 | } |
2129 | 2130 | ||
2130 | static bool tc_cls_act_is_valid_access(int off, int size, | 2131 | static bool tc_cls_act_is_valid_access(int off, int size, |
2131 | enum bpf_access_type type) | 2132 | enum bpf_access_type type, |
2133 | enum bpf_reg_type *reg_type) | ||
2132 | { | 2134 | { |
2133 | if (type == BPF_WRITE) { | 2135 | if (type == BPF_WRITE) { |
2134 | switch (off) { | 2136 | switch (off) { |
@@ -2143,6 +2145,16 @@ static bool tc_cls_act_is_valid_access(int off, int size, | |||
2143 | return false; | 2145 | return false; |
2144 | } | 2146 | } |
2145 | } | 2147 | } |
2148 | |||
2149 | switch (off) { | ||
2150 | case offsetof(struct __sk_buff, data): | ||
2151 | *reg_type = PTR_TO_PACKET; | ||
2152 | break; | ||
2153 | case offsetof(struct __sk_buff, data_end): | ||
2154 | *reg_type = PTR_TO_PACKET_END; | ||
2155 | break; | ||
2156 | } | ||
2157 | |||
2146 | return __is_valid_access(off, size, type); | 2158 | return __is_valid_access(off, size, type); |
2147 | } | 2159 | } |
2148 | 2160 | ||
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 29dd8cc22bbf..510cd62fcb99 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -2469,13 +2469,17 @@ int neigh_xmit(int index, struct net_device *dev, | |||
2469 | tbl = neigh_tables[index]; | 2469 | tbl = neigh_tables[index]; |
2470 | if (!tbl) | 2470 | if (!tbl) |
2471 | goto out; | 2471 | goto out; |
2472 | rcu_read_lock_bh(); | ||
2472 | neigh = __neigh_lookup_noref(tbl, addr, dev); | 2473 | neigh = __neigh_lookup_noref(tbl, addr, dev); |
2473 | if (!neigh) | 2474 | if (!neigh) |
2474 | neigh = __neigh_create(tbl, addr, dev, false); | 2475 | neigh = __neigh_create(tbl, addr, dev, false); |
2475 | err = PTR_ERR(neigh); | 2476 | err = PTR_ERR(neigh); |
2476 | if (IS_ERR(neigh)) | 2477 | if (IS_ERR(neigh)) { |
2478 | rcu_read_unlock_bh(); | ||
2477 | goto out_kfree_skb; | 2479 | goto out_kfree_skb; |
2480 | } | ||
2478 | err = neigh->output(neigh, skb); | 2481 | err = neigh->output(neigh, skb); |
2482 | rcu_read_unlock_bh(); | ||
2479 | } | 2483 | } |
2480 | else if (index == NEIGH_LINK_TABLE) { | 2484 | else if (index == NEIGH_LINK_TABLE) { |
2481 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), | 2485 | err = dev_hard_header(skb, dev, ntohs(skb->protocol), |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 477937465a20..d95631d09248 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -23,6 +23,11 @@ struct esp_skb_cb { | |||
23 | void *tmp; | 23 | void *tmp; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | struct esp_output_extra { | ||
27 | __be32 seqhi; | ||
28 | u32 esphoff; | ||
29 | }; | ||
30 | |||
26 | #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) | 31 | #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) |
27 | 32 | ||
28 | static u32 esp4_get_mtu(struct xfrm_state *x, int mtu); | 33 | static u32 esp4_get_mtu(struct xfrm_state *x, int mtu); |
@@ -35,11 +40,11 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu); | |||
35 | * | 40 | * |
36 | * TODO: Use spare space in skb for this where possible. | 41 | * TODO: Use spare space in skb for this where possible. |
37 | */ | 42 | */ |
38 | static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen) | 43 | static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen) |
39 | { | 44 | { |
40 | unsigned int len; | 45 | unsigned int len; |
41 | 46 | ||
42 | len = seqhilen; | 47 | len = extralen; |
43 | 48 | ||
44 | len += crypto_aead_ivsize(aead); | 49 | len += crypto_aead_ivsize(aead); |
45 | 50 | ||
@@ -57,15 +62,16 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen) | |||
57 | return kmalloc(len, GFP_ATOMIC); | 62 | return kmalloc(len, GFP_ATOMIC); |
58 | } | 63 | } |
59 | 64 | ||
60 | static inline __be32 *esp_tmp_seqhi(void *tmp) | 65 | static inline void *esp_tmp_extra(void *tmp) |
61 | { | 66 | { |
62 | return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32)); | 67 | return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra)); |
63 | } | 68 | } |
64 | static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) | 69 | |
70 | static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen) | ||
65 | { | 71 | { |
66 | return crypto_aead_ivsize(aead) ? | 72 | return crypto_aead_ivsize(aead) ? |
67 | PTR_ALIGN((u8 *)tmp + seqhilen, | 73 | PTR_ALIGN((u8 *)tmp + extralen, |
68 | crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; | 74 | crypto_aead_alignmask(aead) + 1) : tmp + extralen; |
69 | } | 75 | } |
70 | 76 | ||
71 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) | 77 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) |
@@ -99,7 +105,7 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset) | |||
99 | { | 105 | { |
100 | struct ip_esp_hdr *esph = (void *)(skb->data + offset); | 106 | struct ip_esp_hdr *esph = (void *)(skb->data + offset); |
101 | void *tmp = ESP_SKB_CB(skb)->tmp; | 107 | void *tmp = ESP_SKB_CB(skb)->tmp; |
102 | __be32 *seqhi = esp_tmp_seqhi(tmp); | 108 | __be32 *seqhi = esp_tmp_extra(tmp); |
103 | 109 | ||
104 | esph->seq_no = esph->spi; | 110 | esph->seq_no = esph->spi; |
105 | esph->spi = *seqhi; | 111 | esph->spi = *seqhi; |
@@ -107,7 +113,11 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset) | |||
107 | 113 | ||
108 | static void esp_output_restore_header(struct sk_buff *skb) | 114 | static void esp_output_restore_header(struct sk_buff *skb) |
109 | { | 115 | { |
110 | esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32)); | 116 | void *tmp = ESP_SKB_CB(skb)->tmp; |
117 | struct esp_output_extra *extra = esp_tmp_extra(tmp); | ||
118 | |||
119 | esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff - | ||
120 | sizeof(__be32)); | ||
111 | } | 121 | } |
112 | 122 | ||
113 | static void esp_output_done_esn(struct crypto_async_request *base, int err) | 123 | static void esp_output_done_esn(struct crypto_async_request *base, int err) |
@@ -121,6 +131,7 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err) | |||
121 | static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | 131 | static int esp_output(struct xfrm_state *x, struct sk_buff *skb) |
122 | { | 132 | { |
123 | int err; | 133 | int err; |
134 | struct esp_output_extra *extra; | ||
124 | struct ip_esp_hdr *esph; | 135 | struct ip_esp_hdr *esph; |
125 | struct crypto_aead *aead; | 136 | struct crypto_aead *aead; |
126 | struct aead_request *req; | 137 | struct aead_request *req; |
@@ -137,8 +148,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
137 | int tfclen; | 148 | int tfclen; |
138 | int nfrags; | 149 | int nfrags; |
139 | int assoclen; | 150 | int assoclen; |
140 | int seqhilen; | 151 | int extralen; |
141 | __be32 *seqhi; | ||
142 | __be64 seqno; | 152 | __be64 seqno; |
143 | 153 | ||
144 | /* skb is pure payload to encrypt */ | 154 | /* skb is pure payload to encrypt */ |
@@ -166,21 +176,21 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
166 | nfrags = err; | 176 | nfrags = err; |
167 | 177 | ||
168 | assoclen = sizeof(*esph); | 178 | assoclen = sizeof(*esph); |
169 | seqhilen = 0; | 179 | extralen = 0; |
170 | 180 | ||
171 | if (x->props.flags & XFRM_STATE_ESN) { | 181 | if (x->props.flags & XFRM_STATE_ESN) { |
172 | seqhilen += sizeof(__be32); | 182 | extralen += sizeof(*extra); |
173 | assoclen += seqhilen; | 183 | assoclen += sizeof(__be32); |
174 | } | 184 | } |
175 | 185 | ||
176 | tmp = esp_alloc_tmp(aead, nfrags, seqhilen); | 186 | tmp = esp_alloc_tmp(aead, nfrags, extralen); |
177 | if (!tmp) { | 187 | if (!tmp) { |
178 | err = -ENOMEM; | 188 | err = -ENOMEM; |
179 | goto error; | 189 | goto error; |
180 | } | 190 | } |
181 | 191 | ||
182 | seqhi = esp_tmp_seqhi(tmp); | 192 | extra = esp_tmp_extra(tmp); |
183 | iv = esp_tmp_iv(aead, tmp, seqhilen); | 193 | iv = esp_tmp_iv(aead, tmp, extralen); |
184 | req = esp_tmp_req(aead, iv); | 194 | req = esp_tmp_req(aead, iv); |
185 | sg = esp_req_sg(aead, req); | 195 | sg = esp_req_sg(aead, req); |
186 | 196 | ||
@@ -247,8 +257,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
247 | * encryption. | 257 | * encryption. |
248 | */ | 258 | */ |
249 | if ((x->props.flags & XFRM_STATE_ESN)) { | 259 | if ((x->props.flags & XFRM_STATE_ESN)) { |
250 | esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); | 260 | extra->esphoff = (unsigned char *)esph - |
251 | *seqhi = esph->spi; | 261 | skb_transport_header(skb); |
262 | esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); | ||
263 | extra->seqhi = esph->spi; | ||
252 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); | 264 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); |
253 | aead_request_set_callback(req, 0, esp_output_done_esn, skb); | 265 | aead_request_set_callback(req, 0, esp_output_done_esn, skb); |
254 | } | 266 | } |
@@ -445,7 +457,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
445 | goto out; | 457 | goto out; |
446 | 458 | ||
447 | ESP_SKB_CB(skb)->tmp = tmp; | 459 | ESP_SKB_CB(skb)->tmp = tmp; |
448 | seqhi = esp_tmp_seqhi(tmp); | 460 | seqhi = esp_tmp_extra(tmp); |
449 | iv = esp_tmp_iv(aead, tmp, seqhilen); | 461 | iv = esp_tmp_iv(aead, tmp, seqhilen); |
450 | req = esp_tmp_req(aead, iv); | 462 | req = esp_tmp_req(aead, iv); |
451 | sg = esp_req_sg(aead, req); | 463 | sg = esp_req_sg(aead, req); |
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index c4c3e439f424..b798862b6be5 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c | |||
@@ -62,26 +62,26 @@ EXPORT_SYMBOL_GPL(gre_del_protocol); | |||
62 | 62 | ||
63 | /* Fills in tpi and returns header length to be pulled. */ | 63 | /* Fills in tpi and returns header length to be pulled. */ |
64 | int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, | 64 | int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, |
65 | bool *csum_err, __be16 proto) | 65 | bool *csum_err, __be16 proto, int nhs) |
66 | { | 66 | { |
67 | const struct gre_base_hdr *greh; | 67 | const struct gre_base_hdr *greh; |
68 | __be32 *options; | 68 | __be32 *options; |
69 | int hdr_len; | 69 | int hdr_len; |
70 | 70 | ||
71 | if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) | 71 | if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr)))) |
72 | return -EINVAL; | 72 | return -EINVAL; |
73 | 73 | ||
74 | greh = (struct gre_base_hdr *)skb_transport_header(skb); | 74 | greh = (struct gre_base_hdr *)(skb->data + nhs); |
75 | if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) | 75 | if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) |
76 | return -EINVAL; | 76 | return -EINVAL; |
77 | 77 | ||
78 | tpi->flags = gre_flags_to_tnl_flags(greh->flags); | 78 | tpi->flags = gre_flags_to_tnl_flags(greh->flags); |
79 | hdr_len = gre_calc_hlen(tpi->flags); | 79 | hdr_len = gre_calc_hlen(tpi->flags); |
80 | 80 | ||
81 | if (!pskb_may_pull(skb, hdr_len)) | 81 | if (!pskb_may_pull(skb, nhs + hdr_len)) |
82 | return -EINVAL; | 82 | return -EINVAL; |
83 | 83 | ||
84 | greh = (struct gre_base_hdr *)skb_transport_header(skb); | 84 | greh = (struct gre_base_hdr *)(skb->data + nhs); |
85 | tpi->proto = greh->protocol; | 85 | tpi->proto = greh->protocol; |
86 | 86 | ||
87 | options = (__be32 *)(greh + 1); | 87 | options = (__be32 *)(greh + 1); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 8eec78f53f9e..5b1481be0282 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -49,12 +49,6 @@ | |||
49 | #include <net/gre.h> | 49 | #include <net/gre.h> |
50 | #include <net/dst_metadata.h> | 50 | #include <net/dst_metadata.h> |
51 | 51 | ||
52 | #if IS_ENABLED(CONFIG_IPV6) | ||
53 | #include <net/ipv6.h> | ||
54 | #include <net/ip6_fib.h> | ||
55 | #include <net/ip6_route.h> | ||
56 | #endif | ||
57 | |||
58 | /* | 52 | /* |
59 | Problems & solutions | 53 | Problems & solutions |
60 | -------------------- | 54 | -------------------- |
@@ -226,12 +220,14 @@ static void gre_err(struct sk_buff *skb, u32 info) | |||
226 | * by themselves??? | 220 | * by themselves??? |
227 | */ | 221 | */ |
228 | 222 | ||
223 | const struct iphdr *iph = (struct iphdr *)skb->data; | ||
229 | const int type = icmp_hdr(skb)->type; | 224 | const int type = icmp_hdr(skb)->type; |
230 | const int code = icmp_hdr(skb)->code; | 225 | const int code = icmp_hdr(skb)->code; |
231 | struct tnl_ptk_info tpi; | 226 | struct tnl_ptk_info tpi; |
232 | bool csum_err = false; | 227 | bool csum_err = false; |
233 | 228 | ||
234 | if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) { | 229 | if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), |
230 | iph->ihl * 4) < 0) { | ||
235 | if (!csum_err) /* ignore csum errors. */ | 231 | if (!csum_err) /* ignore csum errors. */ |
236 | return; | 232 | return; |
237 | } | 233 | } |
@@ -347,7 +343,7 @@ static int gre_rcv(struct sk_buff *skb) | |||
347 | } | 343 | } |
348 | #endif | 344 | #endif |
349 | 345 | ||
350 | hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)); | 346 | hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0); |
351 | if (hdr_len < 0) | 347 | if (hdr_len < 0) |
352 | goto drop; | 348 | goto drop; |
353 | 349 | ||
@@ -1154,6 +1150,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name, | |||
1154 | { | 1150 | { |
1155 | struct nlattr *tb[IFLA_MAX + 1]; | 1151 | struct nlattr *tb[IFLA_MAX + 1]; |
1156 | struct net_device *dev; | 1152 | struct net_device *dev; |
1153 | LIST_HEAD(list_kill); | ||
1157 | struct ip_tunnel *t; | 1154 | struct ip_tunnel *t; |
1158 | int err; | 1155 | int err; |
1159 | 1156 | ||
@@ -1169,8 +1166,10 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name, | |||
1169 | t->collect_md = true; | 1166 | t->collect_md = true; |
1170 | 1167 | ||
1171 | err = ipgre_newlink(net, dev, tb, NULL); | 1168 | err = ipgre_newlink(net, dev, tb, NULL); |
1172 | if (err < 0) | 1169 | if (err < 0) { |
1173 | goto out; | 1170 | free_netdev(dev); |
1171 | return ERR_PTR(err); | ||
1172 | } | ||
1174 | 1173 | ||
1175 | /* openvswitch users expect packet sizes to be unrestricted, | 1174 | /* openvswitch users expect packet sizes to be unrestricted, |
1176 | * so set the largest MTU we can. | 1175 | * so set the largest MTU we can. |
@@ -1179,9 +1178,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name, | |||
1179 | if (err) | 1178 | if (err) |
1180 | goto out; | 1179 | goto out; |
1181 | 1180 | ||
1181 | err = rtnl_configure_link(dev, NULL); | ||
1182 | if (err < 0) | ||
1183 | goto out; | ||
1184 | |||
1182 | return dev; | 1185 | return dev; |
1183 | out: | 1186 | out: |
1184 | free_netdev(dev); | 1187 | ip_tunnel_dellink(dev, &list_kill); |
1188 | unregister_netdevice_many(&list_kill); | ||
1185 | return ERR_PTR(err); | 1189 | return ERR_PTR(err); |
1186 | } | 1190 | } |
1187 | EXPORT_SYMBOL_GPL(gretap_fb_dev_create); | 1191 | EXPORT_SYMBOL_GPL(gretap_fb_dev_create); |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 2ed9dd2b5f2f..1d71c40eaaf3 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -127,7 +127,9 @@ __be32 ic_myaddr = NONE; /* My IP address */ | |||
127 | static __be32 ic_netmask = NONE; /* Netmask for local subnet */ | 127 | static __be32 ic_netmask = NONE; /* Netmask for local subnet */ |
128 | __be32 ic_gateway = NONE; /* Gateway IP address */ | 128 | __be32 ic_gateway = NONE; /* Gateway IP address */ |
129 | 129 | ||
130 | __be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */ | 130 | #ifdef IPCONFIG_DYNAMIC |
131 | static __be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */ | ||
132 | #endif | ||
131 | 133 | ||
132 | __be32 ic_servaddr = NONE; /* Boot server IP address */ | 134 | __be32 ic_servaddr = NONE; /* Boot server IP address */ |
133 | 135 | ||
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 21a38e296fe2..5ad48ec77710 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -891,8 +891,10 @@ static struct mfc_cache *ipmr_cache_alloc(void) | |||
891 | { | 891 | { |
892 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); | 892 | struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); |
893 | 893 | ||
894 | if (c) | 894 | if (c) { |
895 | c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1; | ||
895 | c->mfc_un.res.minvif = MAXVIFS; | 896 | c->mfc_un.res.minvif = MAXVIFS; |
897 | } | ||
896 | return c; | 898 | return c; |
897 | } | 899 | } |
898 | 900 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index b1bcba0563f2..b26aa870adc0 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2753,7 +2753,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2753 | struct tcp_sock *tp = tcp_sk(sk); | 2753 | struct tcp_sock *tp = tcp_sk(sk); |
2754 | struct sk_buff *skb; | 2754 | struct sk_buff *skb; |
2755 | struct sk_buff *hole = NULL; | 2755 | struct sk_buff *hole = NULL; |
2756 | u32 last_lost; | 2756 | u32 max_segs, last_lost; |
2757 | int mib_idx; | 2757 | int mib_idx; |
2758 | int fwd_rexmitting = 0; | 2758 | int fwd_rexmitting = 0; |
2759 | 2759 | ||
@@ -2773,6 +2773,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2773 | last_lost = tp->snd_una; | 2773 | last_lost = tp->snd_una; |
2774 | } | 2774 | } |
2775 | 2775 | ||
2776 | max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk)); | ||
2776 | tcp_for_write_queue_from(skb, sk) { | 2777 | tcp_for_write_queue_from(skb, sk) { |
2777 | __u8 sacked = TCP_SKB_CB(skb)->sacked; | 2778 | __u8 sacked = TCP_SKB_CB(skb)->sacked; |
2778 | int segs; | 2779 | int segs; |
@@ -2786,6 +2787,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2786 | segs = tp->snd_cwnd - tcp_packets_in_flight(tp); | 2787 | segs = tp->snd_cwnd - tcp_packets_in_flight(tp); |
2787 | if (segs <= 0) | 2788 | if (segs <= 0) |
2788 | return; | 2789 | return; |
2790 | /* In case tcp_shift_skb_data() have aggregated large skbs, | ||
2791 | * we need to make sure not sending too bigs TSO packets | ||
2792 | */ | ||
2793 | segs = min_t(int, segs, max_segs); | ||
2789 | 2794 | ||
2790 | if (fwd_rexmitting) { | 2795 | if (fwd_rexmitting) { |
2791 | begin_fwd: | 2796 | begin_fwd: |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 0ff31d97d485..ca5e8ea29538 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -391,9 +391,9 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum) | |||
391 | return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); | 391 | return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); |
392 | } | 392 | } |
393 | 393 | ||
394 | static inline int compute_score(struct sock *sk, struct net *net, | 394 | static int compute_score(struct sock *sk, struct net *net, |
395 | __be32 saddr, unsigned short hnum, __be16 sport, | 395 | __be32 saddr, __be16 sport, |
396 | __be32 daddr, __be16 dport, int dif) | 396 | __be32 daddr, unsigned short hnum, int dif) |
397 | { | 397 | { |
398 | int score; | 398 | int score; |
399 | struct inet_sock *inet; | 399 | struct inet_sock *inet; |
@@ -434,52 +434,6 @@ static inline int compute_score(struct sock *sk, struct net *net, | |||
434 | return score; | 434 | return score; |
435 | } | 435 | } |
436 | 436 | ||
437 | /* | ||
438 | * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num) | ||
439 | */ | ||
440 | static inline int compute_score2(struct sock *sk, struct net *net, | ||
441 | __be32 saddr, __be16 sport, | ||
442 | __be32 daddr, unsigned int hnum, int dif) | ||
443 | { | ||
444 | int score; | ||
445 | struct inet_sock *inet; | ||
446 | |||
447 | if (!net_eq(sock_net(sk), net) || | ||
448 | ipv6_only_sock(sk)) | ||
449 | return -1; | ||
450 | |||
451 | inet = inet_sk(sk); | ||
452 | |||
453 | if (inet->inet_rcv_saddr != daddr || | ||
454 | inet->inet_num != hnum) | ||
455 | return -1; | ||
456 | |||
457 | score = (sk->sk_family == PF_INET) ? 2 : 1; | ||
458 | |||
459 | if (inet->inet_daddr) { | ||
460 | if (inet->inet_daddr != saddr) | ||
461 | return -1; | ||
462 | score += 4; | ||
463 | } | ||
464 | |||
465 | if (inet->inet_dport) { | ||
466 | if (inet->inet_dport != sport) | ||
467 | return -1; | ||
468 | score += 4; | ||
469 | } | ||
470 | |||
471 | if (sk->sk_bound_dev_if) { | ||
472 | if (sk->sk_bound_dev_if != dif) | ||
473 | return -1; | ||
474 | score += 4; | ||
475 | } | ||
476 | |||
477 | if (sk->sk_incoming_cpu == raw_smp_processor_id()) | ||
478 | score++; | ||
479 | |||
480 | return score; | ||
481 | } | ||
482 | |||
483 | static u32 udp_ehashfn(const struct net *net, const __be32 laddr, | 437 | static u32 udp_ehashfn(const struct net *net, const __be32 laddr, |
484 | const __u16 lport, const __be32 faddr, | 438 | const __u16 lport, const __be32 faddr, |
485 | const __be16 fport) | 439 | const __be16 fport) |
@@ -492,11 +446,11 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr, | |||
492 | udp_ehash_secret + net_hash_mix(net)); | 446 | udp_ehash_secret + net_hash_mix(net)); |
493 | } | 447 | } |
494 | 448 | ||
495 | /* called with read_rcu_lock() */ | 449 | /* called with rcu_read_lock() */ |
496 | static struct sock *udp4_lib_lookup2(struct net *net, | 450 | static struct sock *udp4_lib_lookup2(struct net *net, |
497 | __be32 saddr, __be16 sport, | 451 | __be32 saddr, __be16 sport, |
498 | __be32 daddr, unsigned int hnum, int dif, | 452 | __be32 daddr, unsigned int hnum, int dif, |
499 | struct udp_hslot *hslot2, unsigned int slot2, | 453 | struct udp_hslot *hslot2, |
500 | struct sk_buff *skb) | 454 | struct sk_buff *skb) |
501 | { | 455 | { |
502 | struct sock *sk, *result; | 456 | struct sock *sk, *result; |
@@ -506,7 +460,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, | |||
506 | result = NULL; | 460 | result = NULL; |
507 | badness = 0; | 461 | badness = 0; |
508 | udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { | 462 | udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { |
509 | score = compute_score2(sk, net, saddr, sport, | 463 | score = compute_score(sk, net, saddr, sport, |
510 | daddr, hnum, dif); | 464 | daddr, hnum, dif); |
511 | if (score > badness) { | 465 | if (score > badness) { |
512 | reuseport = sk->sk_reuseport; | 466 | reuseport = sk->sk_reuseport; |
@@ -554,17 +508,22 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | |||
554 | 508 | ||
555 | result = udp4_lib_lookup2(net, saddr, sport, | 509 | result = udp4_lib_lookup2(net, saddr, sport, |
556 | daddr, hnum, dif, | 510 | daddr, hnum, dif, |
557 | hslot2, slot2, skb); | 511 | hslot2, skb); |
558 | if (!result) { | 512 | if (!result) { |
513 | unsigned int old_slot2 = slot2; | ||
559 | hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); | 514 | hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); |
560 | slot2 = hash2 & udptable->mask; | 515 | slot2 = hash2 & udptable->mask; |
516 | /* avoid searching the same slot again. */ | ||
517 | if (unlikely(slot2 == old_slot2)) | ||
518 | return result; | ||
519 | |||
561 | hslot2 = &udptable->hash2[slot2]; | 520 | hslot2 = &udptable->hash2[slot2]; |
562 | if (hslot->count < hslot2->count) | 521 | if (hslot->count < hslot2->count) |
563 | goto begin; | 522 | goto begin; |
564 | 523 | ||
565 | result = udp4_lib_lookup2(net, saddr, sport, | 524 | result = udp4_lib_lookup2(net, saddr, sport, |
566 | htonl(INADDR_ANY), hnum, dif, | 525 | daddr, hnum, dif, |
567 | hslot2, slot2, skb); | 526 | hslot2, skb); |
568 | } | 527 | } |
569 | return result; | 528 | return result; |
570 | } | 529 | } |
@@ -572,8 +531,8 @@ begin: | |||
572 | result = NULL; | 531 | result = NULL; |
573 | badness = 0; | 532 | badness = 0; |
574 | sk_for_each_rcu(sk, &hslot->head) { | 533 | sk_for_each_rcu(sk, &hslot->head) { |
575 | score = compute_score(sk, net, saddr, hnum, sport, | 534 | score = compute_score(sk, net, saddr, sport, |
576 | daddr, dport, dif); | 535 | daddr, hnum, dif); |
577 | if (score > badness) { | 536 | if (score > badness) { |
578 | reuseport = sk->sk_reuseport; | 537 | reuseport = sk->sk_reuseport; |
579 | if (reuseport) { | 538 | if (reuseport) { |
@@ -1755,8 +1714,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, | |||
1755 | return err; | 1714 | return err; |
1756 | } | 1715 | } |
1757 | 1716 | ||
1758 | return skb_checksum_init_zero_check(skb, proto, uh->check, | 1717 | /* Note, we are only interested in != 0 or == 0, thus the |
1759 | inet_compute_pseudo); | 1718 | * force to int. |
1719 | */ | ||
1720 | return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, | ||
1721 | inet_compute_pseudo); | ||
1760 | } | 1722 | } |
1761 | 1723 | ||
1762 | /* | 1724 | /* |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index fd11f5856ce8..bd59c343d35f 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -98,7 +98,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
98 | 98 | ||
99 | if (!(type & ICMPV6_INFOMSG_MASK)) | 99 | if (!(type & ICMPV6_INFOMSG_MASK)) |
100 | if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST) | 100 | if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST) |
101 | ping_err(skb, offset, info); | 101 | ping_err(skb, offset, ntohl(info)); |
102 | } | 102 | } |
103 | 103 | ||
104 | static int icmpv6_rcv(struct sk_buff *skb); | 104 | static int icmpv6_rcv(struct sk_buff *skb); |
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c index b2025bf3da4a..c0cbcb259f5a 100644 --- a/net/ipv6/ip6_checksum.c +++ b/net/ipv6/ip6_checksum.c | |||
@@ -78,9 +78,12 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) | |||
78 | * we accept a checksum of zero here. When we find the socket | 78 | * we accept a checksum of zero here. When we find the socket |
79 | * for the UDP packet we'll check if that socket allows zero checksum | 79 | * for the UDP packet we'll check if that socket allows zero checksum |
80 | * for IPv6 (set by socket option). | 80 | * for IPv6 (set by socket option). |
81 | * | ||
82 | * Note, we are only interested in != 0 or == 0, thus the | ||
83 | * force to int. | ||
81 | */ | 84 | */ |
82 | return skb_checksum_init_zero_check(skb, proto, uh->check, | 85 | return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, |
83 | ip6_compute_pseudo); | 86 | ip6_compute_pseudo); |
84 | } | 87 | } |
85 | EXPORT_SYMBOL(udp6_csum_init); | 88 | EXPORT_SYMBOL(udp6_csum_init); |
86 | 89 | ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index fdc9de276ab1..776d145113e1 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -468,7 +468,7 @@ static int gre_rcv(struct sk_buff *skb) | |||
468 | bool csum_err = false; | 468 | bool csum_err = false; |
469 | int hdr_len; | 469 | int hdr_len; |
470 | 470 | ||
471 | hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6)); | 471 | hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0); |
472 | if (hdr_len < 0) | 472 | if (hdr_len < 0) |
473 | goto drop; | 473 | goto drop; |
474 | 474 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index f2e2013f8346..487ef3bc7bbc 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -1074,6 +1074,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void) | |||
1074 | struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); | 1074 | struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); |
1075 | if (!c) | 1075 | if (!c) |
1076 | return NULL; | 1076 | return NULL; |
1077 | c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1; | ||
1077 | c->mfc_un.res.minvif = MAXMIFS; | 1078 | c->mfc_un.res.minvif = MAXMIFS; |
1078 | return c; | 1079 | return c; |
1079 | } | 1080 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 08b77f421268..49817555449e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1783,7 +1783,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net, | |||
1783 | }; | 1783 | }; |
1784 | struct fib6_table *table; | 1784 | struct fib6_table *table; |
1785 | struct rt6_info *rt; | 1785 | struct rt6_info *rt; |
1786 | int flags = 0; | 1786 | int flags = RT6_LOOKUP_F_IFACE; |
1787 | 1787 | ||
1788 | table = fib6_get_table(net, cfg->fc_table); | 1788 | table = fib6_get_table(net, cfg->fc_table); |
1789 | if (!table) | 1789 | if (!table) |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index cdd714690f95..917a5cd4b8fc 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -526,13 +526,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
526 | 526 | ||
527 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { | 527 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { |
528 | ipv4_update_pmtu(skb, dev_net(skb->dev), info, | 528 | ipv4_update_pmtu(skb, dev_net(skb->dev), info, |
529 | t->parms.link, 0, IPPROTO_IPV6, 0); | 529 | t->parms.link, 0, iph->protocol, 0); |
530 | err = 0; | 530 | err = 0; |
531 | goto out; | 531 | goto out; |
532 | } | 532 | } |
533 | if (type == ICMP_REDIRECT) { | 533 | if (type == ICMP_REDIRECT) { |
534 | ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, | 534 | ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, |
535 | IPPROTO_IPV6, 0); | 535 | iph->protocol, 0); |
536 | err = 0; | 536 | err = 0; |
537 | goto out; | 537 | goto out; |
538 | } | 538 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f36c2d076fce..2255d2bf5f6b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -738,7 +738,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { | |||
738 | static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, | 738 | static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, |
739 | u32 ack, u32 win, u32 tsval, u32 tsecr, | 739 | u32 ack, u32 win, u32 tsval, u32 tsecr, |
740 | int oif, struct tcp_md5sig_key *key, int rst, | 740 | int oif, struct tcp_md5sig_key *key, int rst, |
741 | u8 tclass, u32 label) | 741 | u8 tclass, __be32 label) |
742 | { | 742 | { |
743 | const struct tcphdr *th = tcp_hdr(skb); | 743 | const struct tcphdr *th = tcp_hdr(skb); |
744 | struct tcphdr *t1; | 744 | struct tcphdr *t1; |
@@ -911,7 +911,7 @@ out: | |||
911 | static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, | 911 | static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, |
912 | u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, | 912 | u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, |
913 | struct tcp_md5sig_key *key, u8 tclass, | 913 | struct tcp_md5sig_key *key, u8 tclass, |
914 | u32 label) | 914 | __be32 label) |
915 | { | 915 | { |
916 | tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, | 916 | tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, |
917 | tclass, label); | 917 | tclass, label); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4bb5c13777f1..0a71a312d0d8 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -115,11 +115,10 @@ static void udp_v6_rehash(struct sock *sk) | |||
115 | udp_lib_rehash(sk, new_hash); | 115 | udp_lib_rehash(sk, new_hash); |
116 | } | 116 | } |
117 | 117 | ||
118 | static inline int compute_score(struct sock *sk, struct net *net, | 118 | static int compute_score(struct sock *sk, struct net *net, |
119 | unsigned short hnum, | 119 | const struct in6_addr *saddr, __be16 sport, |
120 | const struct in6_addr *saddr, __be16 sport, | 120 | const struct in6_addr *daddr, unsigned short hnum, |
121 | const struct in6_addr *daddr, __be16 dport, | 121 | int dif) |
122 | int dif) | ||
123 | { | 122 | { |
124 | int score; | 123 | int score; |
125 | struct inet_sock *inet; | 124 | struct inet_sock *inet; |
@@ -162,54 +161,11 @@ static inline int compute_score(struct sock *sk, struct net *net, | |||
162 | return score; | 161 | return score; |
163 | } | 162 | } |
164 | 163 | ||
165 | static inline int compute_score2(struct sock *sk, struct net *net, | 164 | /* called with rcu_read_lock() */ |
166 | const struct in6_addr *saddr, __be16 sport, | ||
167 | const struct in6_addr *daddr, | ||
168 | unsigned short hnum, int dif) | ||
169 | { | ||
170 | int score; | ||
171 | struct inet_sock *inet; | ||
172 | |||
173 | if (!net_eq(sock_net(sk), net) || | ||
174 | udp_sk(sk)->udp_port_hash != hnum || | ||
175 | sk->sk_family != PF_INET6) | ||
176 | return -1; | ||
177 | |||
178 | if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) | ||
179 | return -1; | ||
180 | |||
181 | score = 0; | ||
182 | inet = inet_sk(sk); | ||
183 | |||
184 | if (inet->inet_dport) { | ||
185 | if (inet->inet_dport != sport) | ||
186 | return -1; | ||
187 | score++; | ||
188 | } | ||
189 | |||
190 | if (!ipv6_addr_any(&sk->sk_v6_daddr)) { | ||
191 | if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) | ||
192 | return -1; | ||
193 | score++; | ||
194 | } | ||
195 | |||
196 | if (sk->sk_bound_dev_if) { | ||
197 | if (sk->sk_bound_dev_if != dif) | ||
198 | return -1; | ||
199 | score++; | ||
200 | } | ||
201 | |||
202 | if (sk->sk_incoming_cpu == raw_smp_processor_id()) | ||
203 | score++; | ||
204 | |||
205 | return score; | ||
206 | } | ||
207 | |||
208 | /* called with read_rcu_lock() */ | ||
209 | static struct sock *udp6_lib_lookup2(struct net *net, | 165 | static struct sock *udp6_lib_lookup2(struct net *net, |
210 | const struct in6_addr *saddr, __be16 sport, | 166 | const struct in6_addr *saddr, __be16 sport, |
211 | const struct in6_addr *daddr, unsigned int hnum, int dif, | 167 | const struct in6_addr *daddr, unsigned int hnum, int dif, |
212 | struct udp_hslot *hslot2, unsigned int slot2, | 168 | struct udp_hslot *hslot2, |
213 | struct sk_buff *skb) | 169 | struct sk_buff *skb) |
214 | { | 170 | { |
215 | struct sock *sk, *result; | 171 | struct sock *sk, *result; |
@@ -219,7 +175,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, | |||
219 | result = NULL; | 175 | result = NULL; |
220 | badness = -1; | 176 | badness = -1; |
221 | udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { | 177 | udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { |
222 | score = compute_score2(sk, net, saddr, sport, | 178 | score = compute_score(sk, net, saddr, sport, |
223 | daddr, hnum, dif); | 179 | daddr, hnum, dif); |
224 | if (score > badness) { | 180 | if (score > badness) { |
225 | reuseport = sk->sk_reuseport; | 181 | reuseport = sk->sk_reuseport; |
@@ -268,17 +224,22 @@ struct sock *__udp6_lib_lookup(struct net *net, | |||
268 | 224 | ||
269 | result = udp6_lib_lookup2(net, saddr, sport, | 225 | result = udp6_lib_lookup2(net, saddr, sport, |
270 | daddr, hnum, dif, | 226 | daddr, hnum, dif, |
271 | hslot2, slot2, skb); | 227 | hslot2, skb); |
272 | if (!result) { | 228 | if (!result) { |
229 | unsigned int old_slot2 = slot2; | ||
273 | hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum); | 230 | hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum); |
274 | slot2 = hash2 & udptable->mask; | 231 | slot2 = hash2 & udptable->mask; |
232 | /* avoid searching the same slot again. */ | ||
233 | if (unlikely(slot2 == old_slot2)) | ||
234 | return result; | ||
235 | |||
275 | hslot2 = &udptable->hash2[slot2]; | 236 | hslot2 = &udptable->hash2[slot2]; |
276 | if (hslot->count < hslot2->count) | 237 | if (hslot->count < hslot2->count) |
277 | goto begin; | 238 | goto begin; |
278 | 239 | ||
279 | result = udp6_lib_lookup2(net, saddr, sport, | 240 | result = udp6_lib_lookup2(net, saddr, sport, |
280 | &in6addr_any, hnum, dif, | 241 | daddr, hnum, dif, |
281 | hslot2, slot2, skb); | 242 | hslot2, skb); |
282 | } | 243 | } |
283 | return result; | 244 | return result; |
284 | } | 245 | } |
@@ -286,7 +247,7 @@ begin: | |||
286 | result = NULL; | 247 | result = NULL; |
287 | badness = -1; | 248 | badness = -1; |
288 | sk_for_each_rcu(sk, &hslot->head) { | 249 | sk_for_each_rcu(sk, &hslot->head) { |
289 | score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); | 250 | score = compute_score(sk, net, saddr, sport, daddr, hnum, dif); |
290 | if (score > badness) { | 251 | if (score > badness) { |
291 | reuseport = sk->sk_reuseport; | 252 | reuseport = sk->sk_reuseport; |
292 | if (reuseport) { | 253 | if (reuseport) { |
diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c index 738008726cc6..fda7f4715c58 100644 --- a/net/kcm/kcmproc.c +++ b/net/kcm/kcmproc.c | |||
@@ -241,6 +241,7 @@ static const struct file_operations kcm_seq_fops = { | |||
241 | .open = kcm_seq_open, | 241 | .open = kcm_seq_open, |
242 | .read = seq_read, | 242 | .read = seq_read, |
243 | .llseek = seq_lseek, | 243 | .llseek = seq_lseek, |
244 | .release = seq_release_net, | ||
244 | }; | 245 | }; |
245 | 246 | ||
246 | static struct kcm_seq_muxinfo kcm_seq_muxinfo = { | 247 | static struct kcm_seq_muxinfo kcm_seq_muxinfo = { |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 21b1fdf5d01d..6a1603bcdced 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -148,14 +148,17 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) | |||
148 | void mesh_sta_cleanup(struct sta_info *sta) | 148 | void mesh_sta_cleanup(struct sta_info *sta) |
149 | { | 149 | { |
150 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 150 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
151 | u32 changed; | 151 | u32 changed = 0; |
152 | 152 | ||
153 | /* | 153 | /* |
154 | * maybe userspace handles peer allocation and peering, but in either | 154 | * maybe userspace handles peer allocation and peering, but in either |
155 | * case the beacon is still generated by the kernel and we might need | 155 | * case the beacon is still generated by the kernel and we might need |
156 | * an update. | 156 | * an update. |
157 | */ | 157 | */ |
158 | changed = mesh_accept_plinks_update(sdata); | 158 | if (sdata->u.mesh.user_mpm && |
159 | sta->mesh->plink_state == NL80211_PLINK_ESTAB) | ||
160 | changed |= mesh_plink_dec_estab_count(sdata); | ||
161 | changed |= mesh_accept_plinks_update(sdata); | ||
159 | if (!sdata->u.mesh.user_mpm) { | 162 | if (!sdata->u.mesh.user_mpm) { |
160 | changed |= mesh_plink_deactivate(sta); | 163 | changed |= mesh_plink_deactivate(sta); |
161 | del_timer_sync(&sta->mesh->plink_timer); | 164 | del_timer_sync(&sta->mesh->plink_timer); |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index db2312eeb2a4..f204274a9b6b 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -1544,6 +1544,8 @@ void nf_conntrack_cleanup_end(void) | |||
1544 | nf_conntrack_tstamp_fini(); | 1544 | nf_conntrack_tstamp_fini(); |
1545 | nf_conntrack_acct_fini(); | 1545 | nf_conntrack_acct_fini(); |
1546 | nf_conntrack_expect_fini(); | 1546 | nf_conntrack_expect_fini(); |
1547 | |||
1548 | kmem_cache_destroy(nf_conntrack_cachep); | ||
1547 | } | 1549 | } |
1548 | 1550 | ||
1549 | /* | 1551 | /* |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 7b7aa871a174..2c881871db38 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -2946,24 +2946,20 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | |||
2946 | * jumps are already validated for that chain. | 2946 | * jumps are already validated for that chain. |
2947 | */ | 2947 | */ |
2948 | list_for_each_entry(i, &set->bindings, list) { | 2948 | list_for_each_entry(i, &set->bindings, list) { |
2949 | if (binding->flags & NFT_SET_MAP && | 2949 | if (i->flags & NFT_SET_MAP && |
2950 | i->chain == binding->chain) | 2950 | i->chain == binding->chain) |
2951 | goto bind; | 2951 | goto bind; |
2952 | } | 2952 | } |
2953 | 2953 | ||
2954 | iter.genmask = nft_genmask_next(ctx->net); | ||
2954 | iter.skip = 0; | 2955 | iter.skip = 0; |
2955 | iter.count = 0; | 2956 | iter.count = 0; |
2956 | iter.err = 0; | 2957 | iter.err = 0; |
2957 | iter.fn = nf_tables_bind_check_setelem; | 2958 | iter.fn = nf_tables_bind_check_setelem; |
2958 | 2959 | ||
2959 | set->ops->walk(ctx, set, &iter); | 2960 | set->ops->walk(ctx, set, &iter); |
2960 | if (iter.err < 0) { | 2961 | if (iter.err < 0) |
2961 | /* Destroy anonymous sets if binding fails */ | ||
2962 | if (set->flags & NFT_SET_ANONYMOUS) | ||
2963 | nf_tables_set_destroy(ctx, set); | ||
2964 | |||
2965 | return iter.err; | 2962 | return iter.err; |
2966 | } | ||
2967 | } | 2963 | } |
2968 | bind: | 2964 | bind: |
2969 | binding->chain = ctx->chain; | 2965 | binding->chain = ctx->chain; |
@@ -3192,12 +3188,13 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) | |||
3192 | if (nest == NULL) | 3188 | if (nest == NULL) |
3193 | goto nla_put_failure; | 3189 | goto nla_put_failure; |
3194 | 3190 | ||
3195 | args.cb = cb; | 3191 | args.cb = cb; |
3196 | args.skb = skb; | 3192 | args.skb = skb; |
3197 | args.iter.skip = cb->args[0]; | 3193 | args.iter.genmask = nft_genmask_cur(ctx.net); |
3198 | args.iter.count = 0; | 3194 | args.iter.skip = cb->args[0]; |
3199 | args.iter.err = 0; | 3195 | args.iter.count = 0; |
3200 | args.iter.fn = nf_tables_dump_setelem; | 3196 | args.iter.err = 0; |
3197 | args.iter.fn = nf_tables_dump_setelem; | ||
3201 | set->ops->walk(&ctx, set, &args.iter); | 3198 | set->ops->walk(&ctx, set, &args.iter); |
3202 | 3199 | ||
3203 | nla_nest_end(skb, nest); | 3200 | nla_nest_end(skb, nest); |
@@ -4284,6 +4281,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, | |||
4284 | binding->chain != chain) | 4281 | binding->chain != chain) |
4285 | continue; | 4282 | continue; |
4286 | 4283 | ||
4284 | iter.genmask = nft_genmask_next(ctx->net); | ||
4287 | iter.skip = 0; | 4285 | iter.skip = 0; |
4288 | iter.count = 0; | 4286 | iter.count = 0; |
4289 | iter.err = 0; | 4287 | iter.err = 0; |
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index e9f8dffcc244..fb8b5892b5ff 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c | |||
@@ -143,7 +143,7 @@ next_rule: | |||
143 | list_for_each_entry_continue_rcu(rule, &chain->rules, list) { | 143 | list_for_each_entry_continue_rcu(rule, &chain->rules, list) { |
144 | 144 | ||
145 | /* This rule is not active, skip. */ | 145 | /* This rule is not active, skip. */ |
146 | if (unlikely(rule->genmask & (1 << gencursor))) | 146 | if (unlikely(rule->genmask & gencursor)) |
147 | continue; | 147 | continue; |
148 | 148 | ||
149 | rulenum++; | 149 | rulenum++; |
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 6fa016564f90..f39c53a159eb 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c | |||
@@ -189,7 +189,6 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, | |||
189 | struct nft_hash_elem *he; | 189 | struct nft_hash_elem *he; |
190 | struct rhashtable_iter hti; | 190 | struct rhashtable_iter hti; |
191 | struct nft_set_elem elem; | 191 | struct nft_set_elem elem; |
192 | u8 genmask = nft_genmask_cur(read_pnet(&set->pnet)); | ||
193 | int err; | 192 | int err; |
194 | 193 | ||
195 | err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); | 194 | err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); |
@@ -218,7 +217,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, | |||
218 | goto cont; | 217 | goto cont; |
219 | if (nft_set_elem_expired(&he->ext)) | 218 | if (nft_set_elem_expired(&he->ext)) |
220 | goto cont; | 219 | goto cont; |
221 | if (!nft_set_elem_active(&he->ext, genmask)) | 220 | if (!nft_set_elem_active(&he->ext, iter->genmask)) |
222 | goto cont; | 221 | goto cont; |
223 | 222 | ||
224 | elem.priv = he; | 223 | elem.priv = he; |
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c index f762094af7c1..7201d57b5a93 100644 --- a/net/netfilter/nft_rbtree.c +++ b/net/netfilter/nft_rbtree.c | |||
@@ -211,7 +211,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, | |||
211 | struct nft_rbtree_elem *rbe; | 211 | struct nft_rbtree_elem *rbe; |
212 | struct nft_set_elem elem; | 212 | struct nft_set_elem elem; |
213 | struct rb_node *node; | 213 | struct rb_node *node; |
214 | u8 genmask = nft_genmask_cur(read_pnet(&set->pnet)); | ||
215 | 214 | ||
216 | spin_lock_bh(&nft_rbtree_lock); | 215 | spin_lock_bh(&nft_rbtree_lock); |
217 | for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { | 216 | for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { |
@@ -219,7 +218,7 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, | |||
219 | 218 | ||
220 | if (iter->count < iter->skip) | 219 | if (iter->count < iter->skip) |
221 | goto cont; | 220 | goto cont; |
222 | if (!nft_set_elem_active(&rbe->ext, genmask)) | 221 | if (!nft_set_elem_active(&rbe->ext, iter->genmask)) |
223 | goto cont; | 222 | goto cont; |
224 | 223 | ||
225 | elem.priv = rbe; | 224 | elem.priv = rbe; |
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 52f3b9b89e97..b4069a90e375 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
@@ -818,8 +818,18 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key, | |||
818 | */ | 818 | */ |
819 | state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED; | 819 | state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED; |
820 | __ovs_ct_update_key(key, state, &info->zone, exp->master); | 820 | __ovs_ct_update_key(key, state, &info->zone, exp->master); |
821 | } else | 821 | } else { |
822 | return __ovs_ct_lookup(net, key, info, skb); | 822 | struct nf_conn *ct; |
823 | int err; | ||
824 | |||
825 | err = __ovs_ct_lookup(net, key, info, skb); | ||
826 | if (err) | ||
827 | return err; | ||
828 | |||
829 | ct = (struct nf_conn *)skb->nfct; | ||
830 | if (ct) | ||
831 | nf_ct_deliver_cached_events(ct); | ||
832 | } | ||
823 | 833 | ||
824 | return 0; | 834 | return 0; |
825 | } | 835 | } |
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 334287602b78..e48bb1ba3dfc 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -112,7 +112,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | if (conn->c_version < RDS_PROTOCOL(3,1)) { | 115 | if (conn->c_version < RDS_PROTOCOL(3, 1)) { |
116 | printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed," | 116 | printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed," |
117 | " no longer supported\n", | 117 | " no longer supported\n", |
118 | &conn->c_faddr, | 118 | &conn->c_faddr, |
diff --git a/net/rds/loop.c b/net/rds/loop.c index 268f07faaa1a..15f83db78f0c 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c | |||
@@ -96,8 +96,9 @@ out: | |||
96 | */ | 96 | */ |
97 | static void rds_loop_inc_free(struct rds_incoming *inc) | 97 | static void rds_loop_inc_free(struct rds_incoming *inc) |
98 | { | 98 | { |
99 | struct rds_message *rm = container_of(inc, struct rds_message, m_inc); | 99 | struct rds_message *rm = container_of(inc, struct rds_message, m_inc); |
100 | rds_message_put(rm); | 100 | |
101 | rds_message_put(rm); | ||
101 | } | 102 | } |
102 | 103 | ||
103 | /* we need to at least give the thread something to succeed */ | 104 | /* we need to at least give the thread something to succeed */ |
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c index c173f69e1479..e381bbcd9cc1 100644 --- a/net/rds/sysctl.c +++ b/net/rds/sysctl.c | |||
@@ -102,7 +102,8 @@ int rds_sysctl_init(void) | |||
102 | rds_sysctl_reconnect_min = msecs_to_jiffies(1); | 102 | rds_sysctl_reconnect_min = msecs_to_jiffies(1); |
103 | rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; | 103 | rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; |
104 | 104 | ||
105 | rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table); | 105 | rds_sysctl_reg_table = |
106 | register_net_sysctl(&init_net, "net/rds", rds_sysctl_rds_table); | ||
106 | if (!rds_sysctl_reg_table) | 107 | if (!rds_sysctl_reg_table) |
107 | return -ENOMEM; | 108 | return -ENOMEM; |
108 | return 0; | 109 | return 0; |
diff --git a/net/rds/tcp.h b/net/rds/tcp.h index ec0602b0dc24..7940babf6c71 100644 --- a/net/rds/tcp.h +++ b/net/rds/tcp.h | |||
@@ -83,7 +83,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); | |||
83 | void rds_tcp_xmit_prepare(struct rds_connection *conn); | 83 | void rds_tcp_xmit_prepare(struct rds_connection *conn); |
84 | void rds_tcp_xmit_complete(struct rds_connection *conn); | 84 | void rds_tcp_xmit_complete(struct rds_connection *conn); |
85 | int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, | 85 | int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, |
86 | unsigned int hdr_off, unsigned int sg, unsigned int off); | 86 | unsigned int hdr_off, unsigned int sg, unsigned int off); |
87 | void rds_tcp_write_space(struct sock *sk); | 87 | void rds_tcp_write_space(struct sock *sk); |
88 | 88 | ||
89 | /* tcp_stats.c */ | 89 | /* tcp_stats.c */ |
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index ba9ec67f4e41..96c2c4d17909 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c | |||
@@ -55,20 +55,20 @@ void rds_tcp_state_change(struct sock *sk) | |||
55 | 55 | ||
56 | rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state); | 56 | rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state); |
57 | 57 | ||
58 | switch(sk->sk_state) { | 58 | switch (sk->sk_state) { |
59 | /* ignore connecting sockets as they make progress */ | 59 | /* ignore connecting sockets as they make progress */ |
60 | case TCP_SYN_SENT: | 60 | case TCP_SYN_SENT: |
61 | case TCP_SYN_RECV: | 61 | case TCP_SYN_RECV: |
62 | break; | 62 | break; |
63 | case TCP_ESTABLISHED: | 63 | case TCP_ESTABLISHED: |
64 | rds_connect_path_complete(&conn->c_path[0], | 64 | rds_connect_path_complete(&conn->c_path[0], |
65 | RDS_CONN_CONNECTING); | 65 | RDS_CONN_CONNECTING); |
66 | break; | 66 | break; |
67 | case TCP_CLOSE_WAIT: | 67 | case TCP_CLOSE_WAIT: |
68 | case TCP_CLOSE: | 68 | case TCP_CLOSE: |
69 | rds_conn_drop(conn); | 69 | rds_conn_drop(conn); |
70 | default: | 70 | default: |
71 | break; | 71 | break; |
72 | } | 72 | } |
73 | out: | 73 | out: |
74 | read_unlock_bh(&sk->sk_callback_lock); | 74 | read_unlock_bh(&sk->sk_callback_lock); |
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 22d9bb15f731..f9cc945a77b3 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c | |||
@@ -140,7 +140,7 @@ int rds_tcp_accept_one(struct socket *sock) | |||
140 | conn->c_path[0].cp_outgoing = 0; | 140 | conn->c_path[0].cp_outgoing = 0; |
141 | /* rds_connect_path_complete() marks RDS_CONN_UP */ | 141 | /* rds_connect_path_complete() marks RDS_CONN_UP */ |
142 | rds_connect_path_complete(&conn->c_path[0], | 142 | rds_connect_path_complete(&conn->c_path[0], |
143 | RDS_CONN_DISCONNECTING); | 143 | RDS_CONN_RESETTING); |
144 | } | 144 | } |
145 | } else { | 145 | } else { |
146 | rds_tcp_set_callbacks(new_sock, conn); | 146 | rds_tcp_set_callbacks(new_sock, conn); |
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index 3f8fb38996c7..4a87d9ef3084 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c | |||
@@ -172,7 +172,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, | |||
172 | while (left) { | 172 | while (left) { |
173 | if (!tinc) { | 173 | if (!tinc) { |
174 | tinc = kmem_cache_alloc(rds_tcp_incoming_slab, | 174 | tinc = kmem_cache_alloc(rds_tcp_incoming_slab, |
175 | arg->gfp); | 175 | arg->gfp); |
176 | if (!tinc) { | 176 | if (!tinc) { |
177 | desc->error = -ENOMEM; | 177 | desc->error = -ENOMEM; |
178 | goto out; | 178 | goto out; |
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index 2b3414f3c45c..710f1aae97ad 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c | |||
@@ -67,19 +67,19 @@ void rds_tcp_xmit_complete(struct rds_connection *conn) | |||
67 | static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len) | 67 | static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len) |
68 | { | 68 | { |
69 | struct kvec vec = { | 69 | struct kvec vec = { |
70 | .iov_base = data, | 70 | .iov_base = data, |
71 | .iov_len = len, | 71 | .iov_len = len, |
72 | }; | ||
73 | struct msghdr msg = { | ||
74 | .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL, | ||
72 | }; | 75 | }; |
73 | struct msghdr msg = { | ||
74 | .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL, | ||
75 | }; | ||
76 | 76 | ||
77 | return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); | 77 | return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); |
78 | } | 78 | } |
79 | 79 | ||
80 | /* the core send_sem serializes this with other xmit and shutdown */ | 80 | /* the core send_sem serializes this with other xmit and shutdown */ |
81 | int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, | 81 | int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, |
82 | unsigned int hdr_off, unsigned int sg, unsigned int off) | 82 | unsigned int hdr_off, unsigned int sg, unsigned int off) |
83 | { | 83 | { |
84 | struct rds_tcp_connection *tc = conn->c_transport_data; | 84 | struct rds_tcp_connection *tc = conn->c_transport_data; |
85 | int done = 0; | 85 | int done = 0; |
@@ -197,7 +197,7 @@ void rds_tcp_write_space(struct sock *sk) | |||
197 | tc->t_last_seen_una = rds_tcp_snd_una(tc); | 197 | tc->t_last_seen_una = rds_tcp_snd_una(tc); |
198 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); | 198 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); |
199 | 199 | ||
200 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) | 200 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) |
201 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 201 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
202 | 202 | ||
203 | out: | 203 | out: |
diff --git a/net/rds/transport.c b/net/rds/transport.c index f3afd1d60d3c..2ffd3e30c643 100644 --- a/net/rds/transport.c +++ b/net/rds/transport.c | |||
@@ -140,8 +140,7 @@ unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, | |||
140 | rds_info_iter_unmap(iter); | 140 | rds_info_iter_unmap(iter); |
141 | down_read(&rds_trans_sem); | 141 | down_read(&rds_trans_sem); |
142 | 142 | ||
143 | for (i = 0; i < RDS_TRANS_COUNT; i++) | 143 | for (i = 0; i < RDS_TRANS_COUNT; i++) { |
144 | { | ||
145 | trans = transports[i]; | 144 | trans = transports[i]; |
146 | if (!trans || !trans->stats_info_copy) | 145 | if (!trans || !trans->stats_info_copy) |
147 | continue; | 146 | continue; |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index f8c61d2a7963..47ec2305f920 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -1122,7 +1122,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) | |||
1122 | nla_nest_end(skb, nest); | 1122 | nla_nest_end(skb, nest); |
1123 | ret = skb->len; | 1123 | ret = skb->len; |
1124 | } else | 1124 | } else |
1125 | nla_nest_cancel(skb, nest); | 1125 | nlmsg_trim(skb, b); |
1126 | 1126 | ||
1127 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; | 1127 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
1128 | if (NETLINK_CB(cb->skb).portid && ret) | 1128 | if (NETLINK_CB(cb->skb).portid && ret) |
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index b7fa96926c90..845ab5119c05 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c | |||
@@ -106,9 +106,9 @@ int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi) | |||
106 | } | 106 | } |
107 | EXPORT_SYMBOL_GPL(ife_get_meta_u16); | 107 | EXPORT_SYMBOL_GPL(ife_get_meta_u16); |
108 | 108 | ||
109 | int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval) | 109 | int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) |
110 | { | 110 | { |
111 | mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL); | 111 | mi->metaval = kmemdup(metaval, sizeof(u32), gfp); |
112 | if (!mi->metaval) | 112 | if (!mi->metaval) |
113 | return -ENOMEM; | 113 | return -ENOMEM; |
114 | 114 | ||
@@ -116,9 +116,9 @@ int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval) | |||
116 | } | 116 | } |
117 | EXPORT_SYMBOL_GPL(ife_alloc_meta_u32); | 117 | EXPORT_SYMBOL_GPL(ife_alloc_meta_u32); |
118 | 118 | ||
119 | int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval) | 119 | int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) |
120 | { | 120 | { |
121 | mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL); | 121 | mi->metaval = kmemdup(metaval, sizeof(u16), gfp); |
122 | if (!mi->metaval) | 122 | if (!mi->metaval) |
123 | return -ENOMEM; | 123 | return -ENOMEM; |
124 | 124 | ||
@@ -240,10 +240,10 @@ static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len) | |||
240 | } | 240 | } |
241 | 241 | ||
242 | /* called when adding new meta information | 242 | /* called when adding new meta information |
243 | * under ife->tcf_lock | 243 | * under ife->tcf_lock for existing action |
244 | */ | 244 | */ |
245 | static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | 245 | static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, |
246 | void *val, int len) | 246 | void *val, int len, bool exists) |
247 | { | 247 | { |
248 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); | 248 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); |
249 | int ret = 0; | 249 | int ret = 0; |
@@ -251,11 +251,13 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | |||
251 | if (!ops) { | 251 | if (!ops) { |
252 | ret = -ENOENT; | 252 | ret = -ENOENT; |
253 | #ifdef CONFIG_MODULES | 253 | #ifdef CONFIG_MODULES |
254 | spin_unlock_bh(&ife->tcf_lock); | 254 | if (exists) |
255 | spin_unlock_bh(&ife->tcf_lock); | ||
255 | rtnl_unlock(); | 256 | rtnl_unlock(); |
256 | request_module("ifemeta%u", metaid); | 257 | request_module("ifemeta%u", metaid); |
257 | rtnl_lock(); | 258 | rtnl_lock(); |
258 | spin_lock_bh(&ife->tcf_lock); | 259 | if (exists) |
260 | spin_lock_bh(&ife->tcf_lock); | ||
259 | ops = find_ife_oplist(metaid); | 261 | ops = find_ife_oplist(metaid); |
260 | #endif | 262 | #endif |
261 | } | 263 | } |
@@ -272,10 +274,10 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | |||
272 | } | 274 | } |
273 | 275 | ||
274 | /* called when adding new meta information | 276 | /* called when adding new meta information |
275 | * under ife->tcf_lock | 277 | * under ife->tcf_lock for existing action |
276 | */ | 278 | */ |
277 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | 279 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, |
278 | int len) | 280 | int len, bool atomic) |
279 | { | 281 | { |
280 | struct tcf_meta_info *mi = NULL; | 282 | struct tcf_meta_info *mi = NULL; |
281 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); | 283 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); |
@@ -284,7 +286,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | |||
284 | if (!ops) | 286 | if (!ops) |
285 | return -ENOENT; | 287 | return -ENOENT; |
286 | 288 | ||
287 | mi = kzalloc(sizeof(*mi), GFP_KERNEL); | 289 | mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); |
288 | if (!mi) { | 290 | if (!mi) { |
289 | /*put back what find_ife_oplist took */ | 291 | /*put back what find_ife_oplist took */ |
290 | module_put(ops->owner); | 292 | module_put(ops->owner); |
@@ -294,7 +296,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | |||
294 | mi->metaid = metaid; | 296 | mi->metaid = metaid; |
295 | mi->ops = ops; | 297 | mi->ops = ops; |
296 | if (len > 0) { | 298 | if (len > 0) { |
297 | ret = ops->alloc(mi, metaval); | 299 | ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); |
298 | if (ret != 0) { | 300 | if (ret != 0) { |
299 | kfree(mi); | 301 | kfree(mi); |
300 | module_put(ops->owner); | 302 | module_put(ops->owner); |
@@ -313,11 +315,13 @@ static int use_all_metadata(struct tcf_ife_info *ife) | |||
313 | int rc = 0; | 315 | int rc = 0; |
314 | int installed = 0; | 316 | int installed = 0; |
315 | 317 | ||
318 | read_lock(&ife_mod_lock); | ||
316 | list_for_each_entry(o, &ifeoplist, list) { | 319 | list_for_each_entry(o, &ifeoplist, list) { |
317 | rc = add_metainfo(ife, o->metaid, NULL, 0); | 320 | rc = add_metainfo(ife, o->metaid, NULL, 0, true); |
318 | if (rc == 0) | 321 | if (rc == 0) |
319 | installed += 1; | 322 | installed += 1; |
320 | } | 323 | } |
324 | read_unlock(&ife_mod_lock); | ||
321 | 325 | ||
322 | if (installed) | 326 | if (installed) |
323 | return 0; | 327 | return 0; |
@@ -385,8 +389,9 @@ static void tcf_ife_cleanup(struct tc_action *a, int bind) | |||
385 | spin_unlock_bh(&ife->tcf_lock); | 389 | spin_unlock_bh(&ife->tcf_lock); |
386 | } | 390 | } |
387 | 391 | ||
388 | /* under ife->tcf_lock */ | 392 | /* under ife->tcf_lock for existing action */ |
389 | static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb) | 393 | static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, |
394 | bool exists) | ||
390 | { | 395 | { |
391 | int len = 0; | 396 | int len = 0; |
392 | int rc = 0; | 397 | int rc = 0; |
@@ -398,11 +403,11 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb) | |||
398 | val = nla_data(tb[i]); | 403 | val = nla_data(tb[i]); |
399 | len = nla_len(tb[i]); | 404 | len = nla_len(tb[i]); |
400 | 405 | ||
401 | rc = load_metaops_and_vet(ife, i, val, len); | 406 | rc = load_metaops_and_vet(ife, i, val, len, exists); |
402 | if (rc != 0) | 407 | if (rc != 0) |
403 | return rc; | 408 | return rc; |
404 | 409 | ||
405 | rc = add_metainfo(ife, i, val, len); | 410 | rc = add_metainfo(ife, i, val, len, exists); |
406 | if (rc) | 411 | if (rc) |
407 | return rc; | 412 | return rc; |
408 | } | 413 | } |
@@ -475,7 +480,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, | |||
475 | saddr = nla_data(tb[TCA_IFE_SMAC]); | 480 | saddr = nla_data(tb[TCA_IFE_SMAC]); |
476 | } | 481 | } |
477 | 482 | ||
478 | spin_lock_bh(&ife->tcf_lock); | 483 | if (exists) |
484 | spin_lock_bh(&ife->tcf_lock); | ||
479 | ife->tcf_action = parm->action; | 485 | ife->tcf_action = parm->action; |
480 | 486 | ||
481 | if (parm->flags & IFE_ENCODE) { | 487 | if (parm->flags & IFE_ENCODE) { |
@@ -505,11 +511,12 @@ metadata_parse_err: | |||
505 | if (ret == ACT_P_CREATED) | 511 | if (ret == ACT_P_CREATED) |
506 | _tcf_ife_cleanup(a, bind); | 512 | _tcf_ife_cleanup(a, bind); |
507 | 513 | ||
508 | spin_unlock_bh(&ife->tcf_lock); | 514 | if (exists) |
515 | spin_unlock_bh(&ife->tcf_lock); | ||
509 | return err; | 516 | return err; |
510 | } | 517 | } |
511 | 518 | ||
512 | err = populate_metalist(ife, tb2); | 519 | err = populate_metalist(ife, tb2, exists); |
513 | if (err) | 520 | if (err) |
514 | goto metadata_parse_err; | 521 | goto metadata_parse_err; |
515 | 522 | ||
@@ -524,12 +531,14 @@ metadata_parse_err: | |||
524 | if (ret == ACT_P_CREATED) | 531 | if (ret == ACT_P_CREATED) |
525 | _tcf_ife_cleanup(a, bind); | 532 | _tcf_ife_cleanup(a, bind); |
526 | 533 | ||
527 | spin_unlock_bh(&ife->tcf_lock); | 534 | if (exists) |
535 | spin_unlock_bh(&ife->tcf_lock); | ||
528 | return err; | 536 | return err; |
529 | } | 537 | } |
530 | } | 538 | } |
531 | 539 | ||
532 | spin_unlock_bh(&ife->tcf_lock); | 540 | if (exists) |
541 | spin_unlock_bh(&ife->tcf_lock); | ||
533 | 542 | ||
534 | if (ret == ACT_P_CREATED) | 543 | if (ret == ACT_P_CREATED) |
535 | tcf_hash_insert(tn, a); | 544 | tcf_hash_insert(tn, a); |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 6148e323ed93..b8c50600697a 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -123,10 +123,13 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, | |||
123 | } | 123 | } |
124 | 124 | ||
125 | td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); | 125 | td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); |
126 | if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) | 126 | if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) { |
127 | if (exists) | ||
128 | tcf_hash_release(a, bind); | ||
127 | return -EINVAL; | 129 | return -EINVAL; |
130 | } | ||
128 | 131 | ||
129 | if (!tcf_hash_check(tn, index, a, bind)) { | 132 | if (!exists) { |
130 | ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind, | 133 | ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind, |
131 | false); | 134 | false); |
132 | if (ret) | 135 | if (ret) |
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 6ea0db427f91..baeed6a78d28 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c | |||
@@ -40,14 +40,18 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
40 | static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, | 40 | static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
41 | struct sk_buff **to_free) | 41 | struct sk_buff **to_free) |
42 | { | 42 | { |
43 | unsigned int prev_backlog; | ||
44 | |||
43 | if (likely(skb_queue_len(&sch->q) < sch->limit)) | 45 | if (likely(skb_queue_len(&sch->q) < sch->limit)) |
44 | return qdisc_enqueue_tail(skb, sch); | 46 | return qdisc_enqueue_tail(skb, sch); |
45 | 47 | ||
48 | prev_backlog = sch->qstats.backlog; | ||
46 | /* queue full, remove one skb to fulfill the limit */ | 49 | /* queue full, remove one skb to fulfill the limit */ |
47 | __qdisc_queue_drop_head(sch, &sch->q, to_free); | 50 | __qdisc_queue_drop_head(sch, &sch->q, to_free); |
48 | qdisc_qstats_drop(sch); | 51 | qdisc_qstats_drop(sch); |
49 | qdisc_enqueue_tail(skb, sch); | 52 | qdisc_enqueue_tail(skb, sch); |
50 | 53 | ||
54 | qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); | ||
51 | return NET_XMIT_CN; | 55 | return NET_XMIT_CN; |
52 | } | 56 | } |
53 | 57 | ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index ba098f2654b4..91982d9784b3 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -984,7 +984,9 @@ static void htb_work_func(struct work_struct *work) | |||
984 | struct htb_sched *q = container_of(work, struct htb_sched, work); | 984 | struct htb_sched *q = container_of(work, struct htb_sched, work); |
985 | struct Qdisc *sch = q->watchdog.qdisc; | 985 | struct Qdisc *sch = q->watchdog.qdisc; |
986 | 986 | ||
987 | rcu_read_lock(); | ||
987 | __netif_schedule(qdisc_root(sch)); | 988 | __netif_schedule(qdisc_root(sch)); |
989 | rcu_read_unlock(); | ||
988 | } | 990 | } |
989 | 991 | ||
990 | static int htb_init(struct Qdisc *sch, struct nlattr *opt) | 992 | static int htb_init(struct Qdisc *sch, struct nlattr *opt) |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 6eac3d880048..aaaf02175338 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -621,17 +621,17 @@ deliver: | |||
621 | #endif | 621 | #endif |
622 | 622 | ||
623 | if (q->qdisc) { | 623 | if (q->qdisc) { |
624 | unsigned int pkt_len = qdisc_pkt_len(skb); | ||
624 | struct sk_buff *to_free = NULL; | 625 | struct sk_buff *to_free = NULL; |
625 | int err; | 626 | int err; |
626 | 627 | ||
627 | err = qdisc_enqueue(skb, q->qdisc, &to_free); | 628 | err = qdisc_enqueue(skb, q->qdisc, &to_free); |
628 | kfree_skb_list(to_free); | 629 | kfree_skb_list(to_free); |
629 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 630 | if (err != NET_XMIT_SUCCESS && |
630 | if (net_xmit_drop_count(err)) { | 631 | net_xmit_drop_count(err)) { |
631 | qdisc_qstats_drop(sch); | 632 | qdisc_qstats_drop(sch); |
632 | qdisc_tree_reduce_backlog(sch, 1, | 633 | qdisc_tree_reduce_backlog(sch, 1, |
633 | qdisc_pkt_len(skb)); | 634 | pkt_len); |
634 | } | ||
635 | } | 635 | } |
636 | goto tfifo_dequeue; | 636 | goto tfifo_dequeue; |
637 | } | 637 | } |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index f4d443aeae54..8f575899adfa 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -153,8 +153,9 @@ prio_destroy(struct Qdisc *sch) | |||
153 | static int prio_tune(struct Qdisc *sch, struct nlattr *opt) | 153 | static int prio_tune(struct Qdisc *sch, struct nlattr *opt) |
154 | { | 154 | { |
155 | struct prio_sched_data *q = qdisc_priv(sch); | 155 | struct prio_sched_data *q = qdisc_priv(sch); |
156 | struct Qdisc *queues[TCQ_PRIO_BANDS]; | ||
157 | int oldbands = q->bands, i; | ||
156 | struct tc_prio_qopt *qopt; | 158 | struct tc_prio_qopt *qopt; |
157 | int i; | ||
158 | 159 | ||
159 | if (nla_len(opt) < sizeof(*qopt)) | 160 | if (nla_len(opt) < sizeof(*qopt)) |
160 | return -EINVAL; | 161 | return -EINVAL; |
@@ -168,62 +169,42 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) | |||
168 | return -EINVAL; | 169 | return -EINVAL; |
169 | } | 170 | } |
170 | 171 | ||
172 | /* Before commit, make sure we can allocate all new qdiscs */ | ||
173 | for (i = oldbands; i < qopt->bands; i++) { | ||
174 | queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, | ||
175 | TC_H_MAKE(sch->handle, i + 1)); | ||
176 | if (!queues[i]) { | ||
177 | while (i > oldbands) | ||
178 | qdisc_destroy(queues[--i]); | ||
179 | return -ENOMEM; | ||
180 | } | ||
181 | } | ||
182 | |||
171 | sch_tree_lock(sch); | 183 | sch_tree_lock(sch); |
172 | q->bands = qopt->bands; | 184 | q->bands = qopt->bands; |
173 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); | 185 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); |
174 | 186 | ||
175 | for (i = q->bands; i < TCQ_PRIO_BANDS; i++) { | 187 | for (i = q->bands; i < oldbands; i++) { |
176 | struct Qdisc *child = q->queues[i]; | 188 | struct Qdisc *child = q->queues[i]; |
177 | q->queues[i] = &noop_qdisc; | ||
178 | if (child != &noop_qdisc) { | ||
179 | qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); | ||
180 | qdisc_destroy(child); | ||
181 | } | ||
182 | } | ||
183 | sch_tree_unlock(sch); | ||
184 | 189 | ||
185 | for (i = 0; i < q->bands; i++) { | 190 | qdisc_tree_reduce_backlog(child, child->q.qlen, |
186 | if (q->queues[i] == &noop_qdisc) { | 191 | child->qstats.backlog); |
187 | struct Qdisc *child, *old; | 192 | qdisc_destroy(child); |
188 | |||
189 | child = qdisc_create_dflt(sch->dev_queue, | ||
190 | &pfifo_qdisc_ops, | ||
191 | TC_H_MAKE(sch->handle, i + 1)); | ||
192 | if (child) { | ||
193 | sch_tree_lock(sch); | ||
194 | old = q->queues[i]; | ||
195 | q->queues[i] = child; | ||
196 | |||
197 | if (old != &noop_qdisc) { | ||
198 | qdisc_tree_reduce_backlog(old, | ||
199 | old->q.qlen, | ||
200 | old->qstats.backlog); | ||
201 | qdisc_destroy(old); | ||
202 | } | ||
203 | sch_tree_unlock(sch); | ||
204 | } | ||
205 | } | ||
206 | } | 193 | } |
194 | |||
195 | for (i = oldbands; i < q->bands; i++) | ||
196 | q->queues[i] = queues[i]; | ||
197 | |||
198 | sch_tree_unlock(sch); | ||
207 | return 0; | 199 | return 0; |
208 | } | 200 | } |
209 | 201 | ||
210 | static int prio_init(struct Qdisc *sch, struct nlattr *opt) | 202 | static int prio_init(struct Qdisc *sch, struct nlattr *opt) |
211 | { | 203 | { |
212 | struct prio_sched_data *q = qdisc_priv(sch); | 204 | if (!opt) |
213 | int i; | ||
214 | |||
215 | for (i = 0; i < TCQ_PRIO_BANDS; i++) | ||
216 | q->queues[i] = &noop_qdisc; | ||
217 | |||
218 | if (opt == NULL) { | ||
219 | return -EINVAL; | 205 | return -EINVAL; |
220 | } else { | ||
221 | int err; | ||
222 | 206 | ||
223 | if ((err = prio_tune(sch, opt)) != 0) | 207 | return prio_tune(sch, opt); |
224 | return err; | ||
225 | } | ||
226 | return 0; | ||
227 | } | 208 | } |
228 | 209 | ||
229 | static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) | 210 | static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) |
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 1ce724b87618..f69edcf219e5 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c | |||
@@ -3,12 +3,6 @@ | |||
3 | #include <linux/sock_diag.h> | 3 | #include <linux/sock_diag.h> |
4 | #include <net/sctp/sctp.h> | 4 | #include <net/sctp/sctp.h> |
5 | 5 | ||
6 | extern void inet_diag_msg_common_fill(struct inet_diag_msg *r, | ||
7 | struct sock *sk); | ||
8 | extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, | ||
9 | struct inet_diag_msg *r, int ext, | ||
10 | struct user_namespace *user_ns); | ||
11 | |||
12 | static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, | 6 | static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, |
13 | void *info); | 7 | void *info); |
14 | 8 | ||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 06b4df9faaa1..2808d550d273 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -446,16 +446,27 @@ out_no_rpciod: | |||
446 | return ERR_PTR(err); | 446 | return ERR_PTR(err); |
447 | } | 447 | } |
448 | 448 | ||
449 | struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, | 449 | static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, |
450 | struct rpc_xprt *xprt) | 450 | struct rpc_xprt *xprt) |
451 | { | 451 | { |
452 | struct rpc_clnt *clnt = NULL; | 452 | struct rpc_clnt *clnt = NULL; |
453 | struct rpc_xprt_switch *xps; | 453 | struct rpc_xprt_switch *xps; |
454 | 454 | ||
455 | xps = xprt_switch_alloc(xprt, GFP_KERNEL); | 455 | if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) { |
456 | if (xps == NULL) | 456 | WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP); |
457 | return ERR_PTR(-ENOMEM); | 457 | xps = args->bc_xprt->xpt_bc_xps; |
458 | 458 | xprt_switch_get(xps); | |
459 | } else { | ||
460 | xps = xprt_switch_alloc(xprt, GFP_KERNEL); | ||
461 | if (xps == NULL) { | ||
462 | xprt_put(xprt); | ||
463 | return ERR_PTR(-ENOMEM); | ||
464 | } | ||
465 | if (xprt->bc_xprt) { | ||
466 | xprt_switch_get(xps); | ||
467 | xprt->bc_xprt->xpt_bc_xps = xps; | ||
468 | } | ||
469 | } | ||
459 | clnt = rpc_new_client(args, xps, xprt, NULL); | 470 | clnt = rpc_new_client(args, xps, xprt, NULL); |
460 | if (IS_ERR(clnt)) | 471 | if (IS_ERR(clnt)) |
461 | return clnt; | 472 | return clnt; |
@@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, | |||
483 | 494 | ||
484 | return clnt; | 495 | return clnt; |
485 | } | 496 | } |
486 | EXPORT_SYMBOL_GPL(rpc_create_xprt); | ||
487 | 497 | ||
488 | /** | 498 | /** |
489 | * rpc_create - create an RPC client and transport with one call | 499 | * rpc_create - create an RPC client and transport with one call |
@@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
509 | }; | 519 | }; |
510 | char servername[48]; | 520 | char servername[48]; |
511 | 521 | ||
522 | if (args->bc_xprt) { | ||
523 | WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP); | ||
524 | xprt = args->bc_xprt->xpt_bc_xprt; | ||
525 | if (xprt) { | ||
526 | xprt_get(xprt); | ||
527 | return rpc_create_xprt(args, xprt); | ||
528 | } | ||
529 | } | ||
530 | |||
512 | if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) | 531 | if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) |
513 | xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; | 532 | xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; |
514 | if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) | 533 | if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index f5572e31d518..4f01f63102ee 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref) | |||
136 | /* See comment on corresponding get in xs_setup_bc_tcp(): */ | 136 | /* See comment on corresponding get in xs_setup_bc_tcp(): */ |
137 | if (xprt->xpt_bc_xprt) | 137 | if (xprt->xpt_bc_xprt) |
138 | xprt_put(xprt->xpt_bc_xprt); | 138 | xprt_put(xprt->xpt_bc_xprt); |
139 | if (xprt->xpt_bc_xps) | ||
140 | xprt_switch_put(xprt->xpt_bc_xps); | ||
139 | xprt->xpt_ops->xpo_free(xprt); | 141 | xprt->xpt_ops->xpo_free(xprt); |
140 | module_put(owner); | 142 | module_put(owner); |
141 | } | 143 | } |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 2d3e0c42361e..7e2b2fa189c3 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -3057,6 +3057,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
3057 | return xprt; | 3057 | return xprt; |
3058 | 3058 | ||
3059 | args->bc_xprt->xpt_bc_xprt = NULL; | 3059 | args->bc_xprt->xpt_bc_xprt = NULL; |
3060 | args->bc_xprt->xpt_bc_xps = NULL; | ||
3060 | xprt_put(xprt); | 3061 | xprt_put(xprt); |
3061 | ret = ERR_PTR(-EINVAL); | 3062 | ret = ERR_PTR(-EINVAL); |
3062 | out_err: | 3063 | out_err: |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 9a70e1d744d2..8584cc48654c 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -411,7 +411,7 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb, | |||
411 | return 0; | 411 | return 0; |
412 | 412 | ||
413 | /* Send RESET message even if bearer is detached from device */ | 413 | /* Send RESET message even if bearer is detached from device */ |
414 | tipc_ptr = rtnl_dereference(dev->tipc_ptr); | 414 | tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr); |
415 | if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb)))) | 415 | if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb)))) |
416 | goto drop; | 416 | goto drop; |
417 | 417 | ||
diff --git a/net/tipc/link.c b/net/tipc/link.c index 03f8bdf70d8f..c1df33f878b2 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -705,7 +705,8 @@ static void link_profile_stats(struct tipc_link *l) | |||
705 | */ | 705 | */ |
706 | int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) | 706 | int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) |
707 | { | 707 | { |
708 | int mtyp, rc = 0; | 708 | int mtyp = 0; |
709 | int rc = 0; | ||
709 | bool state = false; | 710 | bool state = false; |
710 | bool probe = false; | 711 | bool probe = false; |
711 | bool setup = false; | 712 | bool setup = false; |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 8740930f0787..17201aa8423d 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #include "name_table.h" | 41 | #include "name_table.h" |
42 | 42 | ||
43 | #define MAX_FORWARD_SIZE 1024 | 43 | #define MAX_FORWARD_SIZE 1024 |
44 | #define BUF_HEADROOM (LL_MAX_HEADER + 48) | ||
45 | #define BUF_TAILROOM 16 | ||
44 | 46 | ||
45 | static unsigned int align(unsigned int i) | 47 | static unsigned int align(unsigned int i) |
46 | { | 48 | { |
@@ -505,6 +507,10 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) | |||
505 | msg_set_hdr_sz(hdr, BASIC_H_SIZE); | 507 | msg_set_hdr_sz(hdr, BASIC_H_SIZE); |
506 | } | 508 | } |
507 | 509 | ||
510 | if (skb_cloned(_skb) && | ||
511 | pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL)) | ||
512 | goto exit; | ||
513 | |||
508 | /* Now reverse the concerned fields */ | 514 | /* Now reverse the concerned fields */ |
509 | msg_set_errcode(hdr, err); | 515 | msg_set_errcode(hdr, err); |
510 | msg_set_origport(hdr, msg_destport(&ohdr)); | 516 | msg_set_origport(hdr, msg_destport(&ohdr)); |
diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 024da8af91f0..7cf52fb39bee 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
@@ -94,17 +94,6 @@ struct plist; | |||
94 | 94 | ||
95 | #define TIPC_MEDIA_INFO_OFFSET 5 | 95 | #define TIPC_MEDIA_INFO_OFFSET 5 |
96 | 96 | ||
97 | /** | ||
98 | * TIPC message buffer code | ||
99 | * | ||
100 | * TIPC message buffer headroom reserves space for the worst-case | ||
101 | * link-level device header (in case the message is sent off-node). | ||
102 | * | ||
103 | * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields | ||
104 | * are word aligned for quicker access | ||
105 | */ | ||
106 | #define BUF_HEADROOM (LL_MAX_HEADER + 48) | ||
107 | |||
108 | struct tipc_skb_cb { | 97 | struct tipc_skb_cb { |
109 | void *handle; | 98 | void *handle; |
110 | struct sk_buff *tail; | 99 | struct sk_buff *tail; |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 88bfcd707064..c49b8df438cb 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -796,9 +796,11 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, | |||
796 | * @tsk: receiving socket | 796 | * @tsk: receiving socket |
797 | * @skb: pointer to message buffer. | 797 | * @skb: pointer to message buffer. |
798 | */ | 798 | */ |
799 | static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb) | 799 | static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, |
800 | struct sk_buff_head *xmitq) | ||
800 | { | 801 | { |
801 | struct sock *sk = &tsk->sk; | 802 | struct sock *sk = &tsk->sk; |
803 | u32 onode = tsk_own_node(tsk); | ||
802 | struct tipc_msg *hdr = buf_msg(skb); | 804 | struct tipc_msg *hdr = buf_msg(skb); |
803 | int mtyp = msg_type(hdr); | 805 | int mtyp = msg_type(hdr); |
804 | bool conn_cong; | 806 | bool conn_cong; |
@@ -811,7 +813,8 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb) | |||
811 | 813 | ||
812 | if (mtyp == CONN_PROBE) { | 814 | if (mtyp == CONN_PROBE) { |
813 | msg_set_type(hdr, CONN_PROBE_REPLY); | 815 | msg_set_type(hdr, CONN_PROBE_REPLY); |
814 | tipc_sk_respond(sk, skb, TIPC_OK); | 816 | if (tipc_msg_reverse(onode, &skb, TIPC_OK)) |
817 | __skb_queue_tail(xmitq, skb); | ||
815 | return; | 818 | return; |
816 | } else if (mtyp == CONN_ACK) { | 819 | } else if (mtyp == CONN_ACK) { |
817 | conn_cong = tsk_conn_cong(tsk); | 820 | conn_cong = tsk_conn_cong(tsk); |
@@ -1686,7 +1689,8 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) | |||
1686 | * | 1689 | * |
1687 | * Returns true if message was added to socket receive queue, otherwise false | 1690 | * Returns true if message was added to socket receive queue, otherwise false |
1688 | */ | 1691 | */ |
1689 | static bool filter_rcv(struct sock *sk, struct sk_buff *skb) | 1692 | static bool filter_rcv(struct sock *sk, struct sk_buff *skb, |
1693 | struct sk_buff_head *xmitq) | ||
1690 | { | 1694 | { |
1691 | struct socket *sock = sk->sk_socket; | 1695 | struct socket *sock = sk->sk_socket; |
1692 | struct tipc_sock *tsk = tipc_sk(sk); | 1696 | struct tipc_sock *tsk = tipc_sk(sk); |
@@ -1696,7 +1700,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb) | |||
1696 | int usr = msg_user(hdr); | 1700 | int usr = msg_user(hdr); |
1697 | 1701 | ||
1698 | if (unlikely(msg_user(hdr) == CONN_MANAGER)) { | 1702 | if (unlikely(msg_user(hdr) == CONN_MANAGER)) { |
1699 | tipc_sk_proto_rcv(tsk, skb); | 1703 | tipc_sk_proto_rcv(tsk, skb, xmitq); |
1700 | return false; | 1704 | return false; |
1701 | } | 1705 | } |
1702 | 1706 | ||
@@ -1739,7 +1743,8 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb) | |||
1739 | return true; | 1743 | return true; |
1740 | 1744 | ||
1741 | reject: | 1745 | reject: |
1742 | tipc_sk_respond(sk, skb, err); | 1746 | if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err)) |
1747 | __skb_queue_tail(xmitq, skb); | ||
1743 | return false; | 1748 | return false; |
1744 | } | 1749 | } |
1745 | 1750 | ||
@@ -1755,9 +1760,24 @@ reject: | |||
1755 | static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) | 1760 | static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
1756 | { | 1761 | { |
1757 | unsigned int truesize = skb->truesize; | 1762 | unsigned int truesize = skb->truesize; |
1763 | struct sk_buff_head xmitq; | ||
1764 | u32 dnode, selector; | ||
1758 | 1765 | ||
1759 | if (likely(filter_rcv(sk, skb))) | 1766 | __skb_queue_head_init(&xmitq); |
1767 | |||
1768 | if (likely(filter_rcv(sk, skb, &xmitq))) { | ||
1760 | atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt); | 1769 | atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt); |
1770 | return 0; | ||
1771 | } | ||
1772 | |||
1773 | if (skb_queue_empty(&xmitq)) | ||
1774 | return 0; | ||
1775 | |||
1776 | /* Send response/rejected message */ | ||
1777 | skb = __skb_dequeue(&xmitq); | ||
1778 | dnode = msg_destnode(buf_msg(skb)); | ||
1779 | selector = msg_origport(buf_msg(skb)); | ||
1780 | tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector); | ||
1761 | return 0; | 1781 | return 0; |
1762 | } | 1782 | } |
1763 | 1783 | ||
@@ -1771,12 +1791,13 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
1771 | * Caller must hold socket lock | 1791 | * Caller must hold socket lock |
1772 | */ | 1792 | */ |
1773 | static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, | 1793 | static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, |
1774 | u32 dport) | 1794 | u32 dport, struct sk_buff_head *xmitq) |
1775 | { | 1795 | { |
1796 | unsigned long time_limit = jiffies + 2; | ||
1797 | struct sk_buff *skb; | ||
1776 | unsigned int lim; | 1798 | unsigned int lim; |
1777 | atomic_t *dcnt; | 1799 | atomic_t *dcnt; |
1778 | struct sk_buff *skb; | 1800 | u32 onode; |
1779 | unsigned long time_limit = jiffies + 2; | ||
1780 | 1801 | ||
1781 | while (skb_queue_len(inputq)) { | 1802 | while (skb_queue_len(inputq)) { |
1782 | if (unlikely(time_after_eq(jiffies, time_limit))) | 1803 | if (unlikely(time_after_eq(jiffies, time_limit))) |
@@ -1788,7 +1809,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, | |||
1788 | 1809 | ||
1789 | /* Add message directly to receive queue if possible */ | 1810 | /* Add message directly to receive queue if possible */ |
1790 | if (!sock_owned_by_user(sk)) { | 1811 | if (!sock_owned_by_user(sk)) { |
1791 | filter_rcv(sk, skb); | 1812 | filter_rcv(sk, skb, xmitq); |
1792 | continue; | 1813 | continue; |
1793 | } | 1814 | } |
1794 | 1815 | ||
@@ -1801,7 +1822,9 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, | |||
1801 | continue; | 1822 | continue; |
1802 | 1823 | ||
1803 | /* Overload => reject message back to sender */ | 1824 | /* Overload => reject message back to sender */ |
1804 | tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD); | 1825 | onode = tipc_own_addr(sock_net(sk)); |
1826 | if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) | ||
1827 | __skb_queue_tail(xmitq, skb); | ||
1805 | break; | 1828 | break; |
1806 | } | 1829 | } |
1807 | } | 1830 | } |
@@ -1814,12 +1837,14 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, | |||
1814 | */ | 1837 | */ |
1815 | void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) | 1838 | void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) |
1816 | { | 1839 | { |
1840 | struct sk_buff_head xmitq; | ||
1817 | u32 dnode, dport = 0; | 1841 | u32 dnode, dport = 0; |
1818 | int err; | 1842 | int err; |
1819 | struct tipc_sock *tsk; | 1843 | struct tipc_sock *tsk; |
1820 | struct sock *sk; | 1844 | struct sock *sk; |
1821 | struct sk_buff *skb; | 1845 | struct sk_buff *skb; |
1822 | 1846 | ||
1847 | __skb_queue_head_init(&xmitq); | ||
1823 | while (skb_queue_len(inputq)) { | 1848 | while (skb_queue_len(inputq)) { |
1824 | dport = tipc_skb_peek_port(inputq, dport); | 1849 | dport = tipc_skb_peek_port(inputq, dport); |
1825 | tsk = tipc_sk_lookup(net, dport); | 1850 | tsk = tipc_sk_lookup(net, dport); |
@@ -1827,9 +1852,14 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) | |||
1827 | if (likely(tsk)) { | 1852 | if (likely(tsk)) { |
1828 | sk = &tsk->sk; | 1853 | sk = &tsk->sk; |
1829 | if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { | 1854 | if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { |
1830 | tipc_sk_enqueue(inputq, sk, dport); | 1855 | tipc_sk_enqueue(inputq, sk, dport, &xmitq); |
1831 | spin_unlock_bh(&sk->sk_lock.slock); | 1856 | spin_unlock_bh(&sk->sk_lock.slock); |
1832 | } | 1857 | } |
1858 | /* Send pending response/rejected messages, if any */ | ||
1859 | while ((skb = __skb_dequeue(&xmitq))) { | ||
1860 | dnode = msg_destnode(buf_msg(skb)); | ||
1861 | tipc_node_xmit_skb(net, skb, dnode, dport); | ||
1862 | } | ||
1833 | sock_put(sk); | 1863 | sock_put(sk); |
1834 | continue; | 1864 | continue; |
1835 | } | 1865 | } |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 80aa6a3e6817..735362c26c8e 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i) | |||
315 | &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { | 315 | &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { |
316 | struct dentry *dentry = unix_sk(s)->path.dentry; | 316 | struct dentry *dentry = unix_sk(s)->path.dentry; |
317 | 317 | ||
318 | if (dentry && d_backing_inode(dentry) == i) { | 318 | if (dentry && d_real_inode(dentry) == i) { |
319 | sock_hold(s); | 319 | sock_hold(s); |
320 | goto found; | 320 | goto found; |
321 | } | 321 | } |
@@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net, | |||
911 | err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); | 911 | err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); |
912 | if (err) | 912 | if (err) |
913 | goto fail; | 913 | goto fail; |
914 | inode = d_backing_inode(path.dentry); | 914 | inode = d_real_inode(path.dentry); |
915 | err = inode_permission(inode, MAY_WRITE); | 915 | err = inode_permission(inode, MAY_WRITE); |
916 | if (err) | 916 | if (err) |
917 | goto put_fail; | 917 | goto put_fail; |
@@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
1048 | goto out_up; | 1048 | goto out_up; |
1049 | } | 1049 | } |
1050 | addr->hash = UNIX_HASH_SIZE; | 1050 | addr->hash = UNIX_HASH_SIZE; |
1051 | hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); | 1051 | hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); |
1052 | spin_lock(&unix_table_lock); | 1052 | spin_lock(&unix_table_lock); |
1053 | u->path = u_path; | 1053 | u->path = u_path; |
1054 | list = &unix_socket_table[hash]; | 1054 | list = &unix_socket_table[hash]; |
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index b5f1221f48d4..b96ac918e0ba 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
@@ -61,6 +61,14 @@ | |||
61 | * function will also cleanup rejected sockets, those that reach the connected | 61 | * function will also cleanup rejected sockets, those that reach the connected |
62 | * state but leave it before they have been accepted. | 62 | * state but leave it before they have been accepted. |
63 | * | 63 | * |
64 | * - Lock ordering for pending or accept queue sockets is: | ||
65 | * | ||
66 | * lock_sock(listener); | ||
67 | * lock_sock_nested(pending, SINGLE_DEPTH_NESTING); | ||
68 | * | ||
69 | * Using explicit nested locking keeps lockdep happy since normally only one | ||
70 | * lock of a given class may be taken at a time. | ||
71 | * | ||
64 | * - Sockets created by user action will be cleaned up when the user process | 72 | * - Sockets created by user action will be cleaned up when the user process |
65 | * calls close(2), causing our release implementation to be called. Our release | 73 | * calls close(2), causing our release implementation to be called. Our release |
66 | * implementation will perform some cleanup then drop the last reference so our | 74 | * implementation will perform some cleanup then drop the last reference so our |
@@ -443,7 +451,7 @@ void vsock_pending_work(struct work_struct *work) | |||
443 | cleanup = true; | 451 | cleanup = true; |
444 | 452 | ||
445 | lock_sock(listener); | 453 | lock_sock(listener); |
446 | lock_sock(sk); | 454 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
447 | 455 | ||
448 | if (vsock_is_pending(sk)) { | 456 | if (vsock_is_pending(sk)) { |
449 | vsock_remove_pending(listener, sk); | 457 | vsock_remove_pending(listener, sk); |
@@ -1292,7 +1300,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) | |||
1292 | if (connected) { | 1300 | if (connected) { |
1293 | listener->sk_ack_backlog--; | 1301 | listener->sk_ack_backlog--; |
1294 | 1302 | ||
1295 | lock_sock(connected); | 1303 | lock_sock_nested(connected, SINGLE_DEPTH_NESTING); |
1296 | vconnected = vsock_sk(connected); | 1304 | vconnected = vsock_sk(connected); |
1297 | 1305 | ||
1298 | /* If the listener socket has received an error, then we should | 1306 | /* If the listener socket has received an error, then we should |
diff --git a/net/wireless/util.c b/net/wireless/util.c index 4e809e978b7d..2443ee30ba5b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -509,7 +509,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr, | |||
509 | * replace EtherType */ | 509 | * replace EtherType */ |
510 | hdrlen += ETH_ALEN + 2; | 510 | hdrlen += ETH_ALEN + 2; |
511 | else | 511 | else |
512 | tmp.h_proto = htons(skb->len); | 512 | tmp.h_proto = htons(skb->len - hdrlen); |
513 | 513 | ||
514 | pskb_pull(skb, hdrlen); | 514 | pskb_pull(skb, hdrlen); |
515 | 515 | ||