diff options
Diffstat (limited to 'net')
32 files changed, 387 insertions, 179 deletions
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 512159bf607f..8323bced8e5b 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -241,19 +241,19 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr) | |||
241 | size = bat_priv->num_ifaces * sizeof(uint8_t); | 241 | size = bat_priv->num_ifaces * sizeof(uint8_t); |
242 | orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC); | 242 | orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC); |
243 | if (!orig_node->bat_iv.bcast_own_sum) | 243 | if (!orig_node->bat_iv.bcast_own_sum) |
244 | goto free_bcast_own; | 244 | goto free_orig_node; |
245 | 245 | ||
246 | hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, | 246 | hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, |
247 | batadv_choose_orig, orig_node, | 247 | batadv_choose_orig, orig_node, |
248 | &orig_node->hash_entry); | 248 | &orig_node->hash_entry); |
249 | if (hash_added != 0) | 249 | if (hash_added != 0) |
250 | goto free_bcast_own; | 250 | goto free_orig_node; |
251 | 251 | ||
252 | return orig_node; | 252 | return orig_node; |
253 | 253 | ||
254 | free_bcast_own: | ||
255 | kfree(orig_node->bat_iv.bcast_own); | ||
256 | free_orig_node: | 254 | free_orig_node: |
255 | /* free twice, as batadv_orig_node_new sets refcount to 2 */ | ||
256 | batadv_orig_node_free_ref(orig_node); | ||
257 | batadv_orig_node_free_ref(orig_node); | 257 | batadv_orig_node_free_ref(orig_node); |
258 | 258 | ||
259 | return NULL; | 259 | return NULL; |
@@ -266,7 +266,7 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, | |||
266 | struct batadv_orig_node *orig_neigh) | 266 | struct batadv_orig_node *orig_neigh) |
267 | { | 267 | { |
268 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 268 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
269 | struct batadv_neigh_node *neigh_node; | 269 | struct batadv_neigh_node *neigh_node, *tmp_neigh_node; |
270 | 270 | ||
271 | neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node); | 271 | neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node); |
272 | if (!neigh_node) | 272 | if (!neigh_node) |
@@ -281,14 +281,24 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, | |||
281 | neigh_node->orig_node = orig_neigh; | 281 | neigh_node->orig_node = orig_neigh; |
282 | neigh_node->if_incoming = hard_iface; | 282 | neigh_node->if_incoming = hard_iface; |
283 | 283 | ||
284 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | ||
285 | "Creating new neighbor %pM for orig_node %pM on interface %s\n", | ||
286 | neigh_addr, orig_node->orig, hard_iface->net_dev->name); | ||
287 | |||
288 | spin_lock_bh(&orig_node->neigh_list_lock); | 284 | spin_lock_bh(&orig_node->neigh_list_lock); |
289 | hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); | 285 | tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface, |
286 | neigh_addr); | ||
287 | if (!tmp_neigh_node) { | ||
288 | hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); | ||
289 | } else { | ||
290 | kfree(neigh_node); | ||
291 | batadv_hardif_free_ref(hard_iface); | ||
292 | neigh_node = tmp_neigh_node; | ||
293 | } | ||
290 | spin_unlock_bh(&orig_node->neigh_list_lock); | 294 | spin_unlock_bh(&orig_node->neigh_list_lock); |
291 | 295 | ||
296 | if (!tmp_neigh_node) | ||
297 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | ||
298 | "Creating new neighbor %pM for orig_node %pM on interface %s\n", | ||
299 | neigh_addr, orig_node->orig, | ||
300 | hard_iface->net_dev->name); | ||
301 | |||
292 | out: | 302 | out: |
293 | return neigh_node; | 303 | return neigh_node; |
294 | } | 304 | } |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 3d417d3641c6..b851cc580853 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -241,7 +241,7 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) | |||
241 | { | 241 | { |
242 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); | 242 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
243 | const struct batadv_hard_iface *hard_iface; | 243 | const struct batadv_hard_iface *hard_iface; |
244 | int min_mtu = ETH_DATA_LEN; | 244 | int min_mtu = INT_MAX; |
245 | 245 | ||
246 | rcu_read_lock(); | 246 | rcu_read_lock(); |
247 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { | 247 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { |
@@ -256,8 +256,6 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) | |||
256 | } | 256 | } |
257 | rcu_read_unlock(); | 257 | rcu_read_unlock(); |
258 | 258 | ||
259 | atomic_set(&bat_priv->packet_size_max, min_mtu); | ||
260 | |||
261 | if (atomic_read(&bat_priv->fragmentation) == 0) | 259 | if (atomic_read(&bat_priv->fragmentation) == 0) |
262 | goto out; | 260 | goto out; |
263 | 261 | ||
@@ -268,13 +266,21 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) | |||
268 | min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE); | 266 | min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE); |
269 | min_mtu -= sizeof(struct batadv_frag_packet); | 267 | min_mtu -= sizeof(struct batadv_frag_packet); |
270 | min_mtu *= BATADV_FRAG_MAX_FRAGMENTS; | 268 | min_mtu *= BATADV_FRAG_MAX_FRAGMENTS; |
271 | atomic_set(&bat_priv->packet_size_max, min_mtu); | ||
272 | |||
273 | /* with fragmentation enabled we can fragment external packets easily */ | ||
274 | min_mtu = min_t(int, min_mtu, ETH_DATA_LEN); | ||
275 | 269 | ||
276 | out: | 270 | out: |
277 | return min_mtu - batadv_max_header_len(); | 271 | /* report to the other components the maximum amount of bytes that |
272 | * batman-adv can send over the wire (without considering the payload | ||
273 | * overhead). For example, this value is used by TT to compute the | ||
274 | * maximum local table table size | ||
275 | */ | ||
276 | atomic_set(&bat_priv->packet_size_max, min_mtu); | ||
277 | |||
278 | /* the real soft-interface MTU is computed by removing the payload | ||
279 | * overhead from the maximum amount of bytes that was just computed. | ||
280 | * | ||
281 | * However batman-adv does not support MTUs bigger than ETH_DATA_LEN | ||
282 | */ | ||
283 | return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN); | ||
278 | } | 284 | } |
279 | 285 | ||
280 | /* adjusts the MTU if a new interface with a smaller MTU appeared. */ | 286 | /* adjusts the MTU if a new interface with a smaller MTU appeared. */ |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 6df12a2e3605..853941629dc1 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -458,6 +458,42 @@ out: | |||
458 | } | 458 | } |
459 | 459 | ||
460 | /** | 460 | /** |
461 | * batadv_neigh_node_get - retrieve a neighbour from the list | ||
462 | * @orig_node: originator which the neighbour belongs to | ||
463 | * @hard_iface: the interface where this neighbour is connected to | ||
464 | * @addr: the address of the neighbour | ||
465 | * | ||
466 | * Looks for and possibly returns a neighbour belonging to this originator list | ||
467 | * which is connected through the provided hard interface. | ||
468 | * Returns NULL if the neighbour is not found. | ||
469 | */ | ||
470 | struct batadv_neigh_node * | ||
471 | batadv_neigh_node_get(const struct batadv_orig_node *orig_node, | ||
472 | const struct batadv_hard_iface *hard_iface, | ||
473 | const uint8_t *addr) | ||
474 | { | ||
475 | struct batadv_neigh_node *tmp_neigh_node, *res = NULL; | ||
476 | |||
477 | rcu_read_lock(); | ||
478 | hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { | ||
479 | if (!batadv_compare_eth(tmp_neigh_node->addr, addr)) | ||
480 | continue; | ||
481 | |||
482 | if (tmp_neigh_node->if_incoming != hard_iface) | ||
483 | continue; | ||
484 | |||
485 | if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) | ||
486 | continue; | ||
487 | |||
488 | res = tmp_neigh_node; | ||
489 | break; | ||
490 | } | ||
491 | rcu_read_unlock(); | ||
492 | |||
493 | return res; | ||
494 | } | ||
495 | |||
496 | /** | ||
461 | * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object | 497 | * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object |
462 | * @rcu: rcu pointer of the orig_ifinfo object | 498 | * @rcu: rcu pointer of the orig_ifinfo object |
463 | */ | 499 | */ |
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 37be290f63f6..db3a9ed734cb 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h | |||
@@ -29,6 +29,10 @@ void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node); | |||
29 | struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, | 29 | struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, |
30 | const uint8_t *addr); | 30 | const uint8_t *addr); |
31 | struct batadv_neigh_node * | 31 | struct batadv_neigh_node * |
32 | batadv_neigh_node_get(const struct batadv_orig_node *orig_node, | ||
33 | const struct batadv_hard_iface *hard_iface, | ||
34 | const uint8_t *addr); | ||
35 | struct batadv_neigh_node * | ||
32 | batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, | 36 | batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, |
33 | const uint8_t *neigh_addr, | 37 | const uint8_t *neigh_addr, |
34 | struct batadv_orig_node *orig_node); | 38 | struct batadv_orig_node *orig_node); |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 1ed9f7c9ecea..a953d5b196a3 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -688,7 +688,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, | |||
688 | int is_old_ttvn; | 688 | int is_old_ttvn; |
689 | 689 | ||
690 | /* check if there is enough data before accessing it */ | 690 | /* check if there is enough data before accessing it */ |
691 | if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0) | 691 | if (!pskb_may_pull(skb, hdr_len + ETH_HLEN)) |
692 | return 0; | 692 | return 0; |
693 | 693 | ||
694 | /* create a copy of the skb (in case of for re-routing) to modify it. */ | 694 | /* create a copy of the skb (in case of for re-routing) to modify it. */ |
@@ -918,6 +918,8 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb, | |||
918 | 918 | ||
919 | if (ret != NET_RX_SUCCESS) | 919 | if (ret != NET_RX_SUCCESS) |
920 | ret = batadv_route_unicast_packet(skb, recv_if); | 920 | ret = batadv_route_unicast_packet(skb, recv_if); |
921 | else | ||
922 | consume_skb(skb); | ||
921 | 923 | ||
922 | return ret; | 924 | return ret; |
923 | } | 925 | } |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 579f5f00a385..843febd1e519 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -254,9 +254,9 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv, | |||
254 | struct batadv_orig_node *orig_node, | 254 | struct batadv_orig_node *orig_node, |
255 | unsigned short vid) | 255 | unsigned short vid) |
256 | { | 256 | { |
257 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; | 257 | struct ethhdr *ethhdr; |
258 | struct batadv_unicast_packet *unicast_packet; | 258 | struct batadv_unicast_packet *unicast_packet; |
259 | int ret = NET_XMIT_DROP; | 259 | int ret = NET_XMIT_DROP, hdr_size; |
260 | 260 | ||
261 | if (!orig_node) | 261 | if (!orig_node) |
262 | goto out; | 262 | goto out; |
@@ -265,12 +265,16 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv, | |||
265 | case BATADV_UNICAST: | 265 | case BATADV_UNICAST: |
266 | if (!batadv_send_skb_prepare_unicast(skb, orig_node)) | 266 | if (!batadv_send_skb_prepare_unicast(skb, orig_node)) |
267 | goto out; | 267 | goto out; |
268 | |||
269 | hdr_size = sizeof(*unicast_packet); | ||
268 | break; | 270 | break; |
269 | case BATADV_UNICAST_4ADDR: | 271 | case BATADV_UNICAST_4ADDR: |
270 | if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, | 272 | if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, |
271 | orig_node, | 273 | orig_node, |
272 | packet_subtype)) | 274 | packet_subtype)) |
273 | goto out; | 275 | goto out; |
276 | |||
277 | hdr_size = sizeof(struct batadv_unicast_4addr_packet); | ||
274 | break; | 278 | break; |
275 | default: | 279 | default: |
276 | /* this function supports UNICAST and UNICAST_4ADDR only. It | 280 | /* this function supports UNICAST and UNICAST_4ADDR only. It |
@@ -279,6 +283,7 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv, | |||
279 | goto out; | 283 | goto out; |
280 | } | 284 | } |
281 | 285 | ||
286 | ethhdr = (struct ethhdr *)(skb->data + hdr_size); | ||
282 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 287 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
283 | 288 | ||
284 | /* inform the destination node that we are still missing a correct route | 289 | /* inform the destination node that we are still missing a correct route |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index b6071f675a3e..959dde721c46 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -1975,6 +1975,7 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
1975 | struct hlist_head *head; | 1975 | struct hlist_head *head; |
1976 | uint32_t i, crc_tmp, crc = 0; | 1976 | uint32_t i, crc_tmp, crc = 0; |
1977 | uint8_t flags; | 1977 | uint8_t flags; |
1978 | __be16 tmp_vid; | ||
1978 | 1979 | ||
1979 | for (i = 0; i < hash->size; i++) { | 1980 | for (i = 0; i < hash->size; i++) { |
1980 | head = &hash->table[i]; | 1981 | head = &hash->table[i]; |
@@ -2011,8 +2012,11 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv, | |||
2011 | orig_node)) | 2012 | orig_node)) |
2012 | continue; | 2013 | continue; |
2013 | 2014 | ||
2014 | crc_tmp = crc32c(0, &tt_common->vid, | 2015 | /* use network order to read the VID: this ensures that |
2015 | sizeof(tt_common->vid)); | 2016 | * every node reads the bytes in the same order. |
2017 | */ | ||
2018 | tmp_vid = htons(tt_common->vid); | ||
2019 | crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); | ||
2016 | 2020 | ||
2017 | /* compute the CRC on flags that have to be kept in sync | 2021 | /* compute the CRC on flags that have to be kept in sync |
2018 | * among nodes | 2022 | * among nodes |
@@ -2046,6 +2050,7 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv, | |||
2046 | struct hlist_head *head; | 2050 | struct hlist_head *head; |
2047 | uint32_t i, crc_tmp, crc = 0; | 2051 | uint32_t i, crc_tmp, crc = 0; |
2048 | uint8_t flags; | 2052 | uint8_t flags; |
2053 | __be16 tmp_vid; | ||
2049 | 2054 | ||
2050 | for (i = 0; i < hash->size; i++) { | 2055 | for (i = 0; i < hash->size; i++) { |
2051 | head = &hash->table[i]; | 2056 | head = &hash->table[i]; |
@@ -2064,8 +2069,11 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv, | |||
2064 | if (tt_common->flags & BATADV_TT_CLIENT_NEW) | 2069 | if (tt_common->flags & BATADV_TT_CLIENT_NEW) |
2065 | continue; | 2070 | continue; |
2066 | 2071 | ||
2067 | crc_tmp = crc32c(0, &tt_common->vid, | 2072 | /* use network order to read the VID: this ensures that |
2068 | sizeof(tt_common->vid)); | 2073 | * every node reads the bytes in the same order. |
2074 | */ | ||
2075 | tmp_vid = htons(tt_common->vid); | ||
2076 | crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); | ||
2069 | 2077 | ||
2070 | /* compute the CRC on flags that have to be kept in sync | 2078 | /* compute the CRC on flags that have to be kept in sync |
2071 | * among nodes | 2079 | * among nodes |
@@ -2262,6 +2270,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node, | |||
2262 | { | 2270 | { |
2263 | struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp; | 2271 | struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp; |
2264 | struct batadv_orig_node_vlan *vlan; | 2272 | struct batadv_orig_node_vlan *vlan; |
2273 | uint32_t crc; | ||
2265 | int i; | 2274 | int i; |
2266 | 2275 | ||
2267 | /* check if each received CRC matches the locally stored one */ | 2276 | /* check if each received CRC matches the locally stored one */ |
@@ -2281,7 +2290,10 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node, | |||
2281 | if (!vlan) | 2290 | if (!vlan) |
2282 | return false; | 2291 | return false; |
2283 | 2292 | ||
2284 | if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc)) | 2293 | crc = vlan->tt.crc; |
2294 | batadv_orig_node_vlan_free_ref(vlan); | ||
2295 | |||
2296 | if (crc != ntohl(tt_vlan_tmp->crc)) | ||
2285 | return false; | 2297 | return false; |
2286 | } | 2298 | } |
2287 | 2299 | ||
@@ -3218,7 +3230,6 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv, | |||
3218 | 3230 | ||
3219 | spin_lock_bh(&orig_node->tt_lock); | 3231 | spin_lock_bh(&orig_node->tt_lock); |
3220 | 3232 | ||
3221 | tt_change = (struct batadv_tvlv_tt_change *)tt_buff; | ||
3222 | batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes, | 3233 | batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes, |
3223 | ttvn, tt_change); | 3234 | ttvn, tt_change); |
3224 | 3235 | ||
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 292e619db896..d9fb93451442 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c | |||
@@ -430,6 +430,16 @@ static void hidp_del_timer(struct hidp_session *session) | |||
430 | del_timer(&session->timer); | 430 | del_timer(&session->timer); |
431 | } | 431 | } |
432 | 432 | ||
433 | static void hidp_process_report(struct hidp_session *session, | ||
434 | int type, const u8 *data, int len, int intr) | ||
435 | { | ||
436 | if (len > HID_MAX_BUFFER_SIZE) | ||
437 | len = HID_MAX_BUFFER_SIZE; | ||
438 | |||
439 | memcpy(session->input_buf, data, len); | ||
440 | hid_input_report(session->hid, type, session->input_buf, len, intr); | ||
441 | } | ||
442 | |||
433 | static void hidp_process_handshake(struct hidp_session *session, | 443 | static void hidp_process_handshake(struct hidp_session *session, |
434 | unsigned char param) | 444 | unsigned char param) |
435 | { | 445 | { |
@@ -502,7 +512,8 @@ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb, | |||
502 | hidp_input_report(session, skb); | 512 | hidp_input_report(session, skb); |
503 | 513 | ||
504 | if (session->hid) | 514 | if (session->hid) |
505 | hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0); | 515 | hidp_process_report(session, HID_INPUT_REPORT, |
516 | skb->data, skb->len, 0); | ||
506 | break; | 517 | break; |
507 | 518 | ||
508 | case HIDP_DATA_RTYPE_OTHER: | 519 | case HIDP_DATA_RTYPE_OTHER: |
@@ -584,7 +595,8 @@ static void hidp_recv_intr_frame(struct hidp_session *session, | |||
584 | hidp_input_report(session, skb); | 595 | hidp_input_report(session, skb); |
585 | 596 | ||
586 | if (session->hid) { | 597 | if (session->hid) { |
587 | hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1); | 598 | hidp_process_report(session, HID_INPUT_REPORT, |
599 | skb->data, skb->len, 1); | ||
588 | BT_DBG("report len %d", skb->len); | 600 | BT_DBG("report len %d", skb->len); |
589 | } | 601 | } |
590 | } else { | 602 | } else { |
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index ab5241400cf7..8798492a6e99 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #define __HIDP_H | 24 | #define __HIDP_H |
25 | 25 | ||
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/hid.h> | ||
27 | #include <linux/kref.h> | 28 | #include <linux/kref.h> |
28 | #include <net/bluetooth/bluetooth.h> | 29 | #include <net/bluetooth/bluetooth.h> |
29 | #include <net/bluetooth/l2cap.h> | 30 | #include <net/bluetooth/l2cap.h> |
@@ -179,6 +180,9 @@ struct hidp_session { | |||
179 | 180 | ||
180 | /* Used in hidp_output_raw_report() */ | 181 | /* Used in hidp_output_raw_report() */ |
181 | int output_report_success; /* boolean */ | 182 | int output_report_success; /* boolean */ |
183 | |||
184 | /* temporary input buffer */ | ||
185 | u8 input_buf[HID_MAX_BUFFER_SIZE]; | ||
182 | }; | 186 | }; |
183 | 187 | ||
184 | /* HIDP init defines */ | 188 | /* HIDP init defines */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 4ad1b78c9c77..b1b0c8d4d7df 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2420,7 +2420,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault); | |||
2420 | * 2. No high memory really exists on this machine. | 2420 | * 2. No high memory really exists on this machine. |
2421 | */ | 2421 | */ |
2422 | 2422 | ||
2423 | static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) | 2423 | static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) |
2424 | { | 2424 | { |
2425 | #ifdef CONFIG_HIGHMEM | 2425 | #ifdef CONFIG_HIGHMEM |
2426 | int i; | 2426 | int i; |
@@ -2495,34 +2495,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) | |||
2495 | } | 2495 | } |
2496 | 2496 | ||
2497 | static netdev_features_t harmonize_features(struct sk_buff *skb, | 2497 | static netdev_features_t harmonize_features(struct sk_buff *skb, |
2498 | netdev_features_t features) | 2498 | const struct net_device *dev, |
2499 | netdev_features_t features) | ||
2499 | { | 2500 | { |
2500 | if (skb->ip_summed != CHECKSUM_NONE && | 2501 | if (skb->ip_summed != CHECKSUM_NONE && |
2501 | !can_checksum_protocol(features, skb_network_protocol(skb))) { | 2502 | !can_checksum_protocol(features, skb_network_protocol(skb))) { |
2502 | features &= ~NETIF_F_ALL_CSUM; | 2503 | features &= ~NETIF_F_ALL_CSUM; |
2503 | } else if (illegal_highdma(skb->dev, skb)) { | 2504 | } else if (illegal_highdma(dev, skb)) { |
2504 | features &= ~NETIF_F_SG; | 2505 | features &= ~NETIF_F_SG; |
2505 | } | 2506 | } |
2506 | 2507 | ||
2507 | return features; | 2508 | return features; |
2508 | } | 2509 | } |
2509 | 2510 | ||
2510 | netdev_features_t netif_skb_features(struct sk_buff *skb) | 2511 | netdev_features_t netif_skb_dev_features(struct sk_buff *skb, |
2512 | const struct net_device *dev) | ||
2511 | { | 2513 | { |
2512 | __be16 protocol = skb->protocol; | 2514 | __be16 protocol = skb->protocol; |
2513 | netdev_features_t features = skb->dev->features; | 2515 | netdev_features_t features = dev->features; |
2514 | 2516 | ||
2515 | if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) | 2517 | if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) |
2516 | features &= ~NETIF_F_GSO_MASK; | 2518 | features &= ~NETIF_F_GSO_MASK; |
2517 | 2519 | ||
2518 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { | 2520 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { |
2519 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | 2521 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
2520 | protocol = veh->h_vlan_encapsulated_proto; | 2522 | protocol = veh->h_vlan_encapsulated_proto; |
2521 | } else if (!vlan_tx_tag_present(skb)) { | 2523 | } else if (!vlan_tx_tag_present(skb)) { |
2522 | return harmonize_features(skb, features); | 2524 | return harmonize_features(skb, dev, features); |
2523 | } | 2525 | } |
2524 | 2526 | ||
2525 | features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | | 2527 | features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | |
2526 | NETIF_F_HW_VLAN_STAG_TX); | 2528 | NETIF_F_HW_VLAN_STAG_TX); |
2527 | 2529 | ||
2528 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) | 2530 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) |
@@ -2530,9 +2532,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) | |||
2530 | NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | | 2532 | NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | |
2531 | NETIF_F_HW_VLAN_STAG_TX; | 2533 | NETIF_F_HW_VLAN_STAG_TX; |
2532 | 2534 | ||
2533 | return harmonize_features(skb, features); | 2535 | return harmonize_features(skb, dev, features); |
2534 | } | 2536 | } |
2535 | EXPORT_SYMBOL(netif_skb_features); | 2537 | EXPORT_SYMBOL(netif_skb_dev_features); |
2536 | 2538 | ||
2537 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2539 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
2538 | struct netdev_queue *txq) | 2540 | struct netdev_queue *txq) |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 87577d447554..e29e810663d7 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -323,17 +323,6 @@ u32 __skb_get_poff(const struct sk_buff *skb) | |||
323 | return poff; | 323 | return poff; |
324 | } | 324 | } |
325 | 325 | ||
326 | static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | ||
327 | { | ||
328 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | ||
329 | net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", | ||
330 | dev->name, queue_index, | ||
331 | dev->real_num_tx_queues); | ||
332 | return 0; | ||
333 | } | ||
334 | return queue_index; | ||
335 | } | ||
336 | |||
337 | static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) | 326 | static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) |
338 | { | 327 | { |
339 | #ifdef CONFIG_XPS | 328 | #ifdef CONFIG_XPS |
@@ -372,7 +361,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) | |||
372 | #endif | 361 | #endif |
373 | } | 362 | } |
374 | 363 | ||
375 | u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | 364 | static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) |
376 | { | 365 | { |
377 | struct sock *sk = skb->sk; | 366 | struct sock *sk = skb->sk; |
378 | int queue_index = sk_tx_queue_get(sk); | 367 | int queue_index = sk_tx_queue_get(sk); |
@@ -392,7 +381,6 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
392 | 381 | ||
393 | return queue_index; | 382 | return queue_index; |
394 | } | 383 | } |
395 | EXPORT_SYMBOL(__netdev_pick_tx); | ||
396 | 384 | ||
397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 385 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
398 | struct sk_buff *skb, | 386 | struct sk_buff *skb, |
@@ -403,13 +391,13 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, | |||
403 | if (dev->real_num_tx_queues != 1) { | 391 | if (dev->real_num_tx_queues != 1) { |
404 | const struct net_device_ops *ops = dev->netdev_ops; | 392 | const struct net_device_ops *ops = dev->netdev_ops; |
405 | if (ops->ndo_select_queue) | 393 | if (ops->ndo_select_queue) |
406 | queue_index = ops->ndo_select_queue(dev, skb, | 394 | queue_index = ops->ndo_select_queue(dev, skb, accel_priv, |
407 | accel_priv); | 395 | __netdev_pick_tx); |
408 | else | 396 | else |
409 | queue_index = __netdev_pick_tx(dev, skb); | 397 | queue_index = __netdev_pick_tx(dev, skb); |
410 | 398 | ||
411 | if (!accel_priv) | 399 | if (!accel_priv) |
412 | queue_index = dev_cap_txqueue(dev, queue_index); | 400 | queue_index = netdev_cap_txqueue(dev, queue_index); |
413 | } | 401 | } |
414 | 402 | ||
415 | skb_set_queue_mapping(skb, queue_index); | 403 | skb_set_queue_mapping(skb, queue_index); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 048dc8d183aa..1a0dac2ef9ad 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1963,16 +1963,21 @@ replay: | |||
1963 | 1963 | ||
1964 | dev->ifindex = ifm->ifi_index; | 1964 | dev->ifindex = ifm->ifi_index; |
1965 | 1965 | ||
1966 | if (ops->newlink) | 1966 | if (ops->newlink) { |
1967 | err = ops->newlink(net, dev, tb, data); | 1967 | err = ops->newlink(net, dev, tb, data); |
1968 | else | 1968 | /* Drivers should call free_netdev() in ->destructor |
1969 | * and unregister it on failure so that device could be | ||
1970 | * finally freed in rtnl_unlock. | ||
1971 | */ | ||
1972 | if (err < 0) | ||
1973 | goto out; | ||
1974 | } else { | ||
1969 | err = register_netdevice(dev); | 1975 | err = register_netdevice(dev); |
1970 | 1976 | if (err < 0) { | |
1971 | if (err < 0) { | 1977 | free_netdev(dev); |
1972 | free_netdev(dev); | 1978 | goto out; |
1973 | goto out; | 1979 | } |
1974 | } | 1980 | } |
1975 | |||
1976 | err = rtnl_configure_link(dev, ifm); | 1981 | err = rtnl_configure_link(dev, ifm); |
1977 | if (err < 0) | 1982 | if (err < 0) |
1978 | unregister_netdevice(dev); | 1983 | unregister_netdevice(dev); |
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c index c073b81a1f3e..62b5828acde0 100644 --- a/net/dccp/ccids/lib/tfrc.c +++ b/net/dccp/ccids/lib/tfrc.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include "tfrc.h" | 8 | #include "tfrc.h" |
9 | 9 | ||
10 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG | 10 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG |
11 | static bool tfrc_debug; | 11 | bool tfrc_debug; |
12 | module_param(tfrc_debug, bool, 0644); | 12 | module_param(tfrc_debug, bool, 0644); |
13 | MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); | 13 | MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages"); |
14 | #endif | 14 | #endif |
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h index a3d8f7c76ae0..40ee7d62b652 100644 --- a/net/dccp/ccids/lib/tfrc.h +++ b/net/dccp/ccids/lib/tfrc.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include "packet_history.h" | 21 | #include "packet_history.h" |
22 | 22 | ||
23 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG | 23 | #ifdef CONFIG_IP_DCCP_TFRC_DEBUG |
24 | extern bool tfrc_debug; | ||
24 | #define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a) | 25 | #define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a) |
25 | #else | 26 | #else |
26 | #define tfrc_pr_debug(format, a...) | 27 | #define tfrc_pr_debug(format, a...) |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index e9f1217a8afd..f3869c186d97 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -39,6 +39,71 @@ | |||
39 | #include <net/route.h> | 39 | #include <net/route.h> |
40 | #include <net/xfrm.h> | 40 | #include <net/xfrm.h> |
41 | 41 | ||
42 | static bool ip_may_fragment(const struct sk_buff *skb) | ||
43 | { | ||
44 | return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || | ||
45 | !skb->local_df; | ||
46 | } | ||
47 | |||
48 | static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) | ||
49 | { | ||
50 | if (skb->len <= mtu || skb->local_df) | ||
51 | return false; | ||
52 | |||
53 | if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) | ||
54 | return false; | ||
55 | |||
56 | return true; | ||
57 | } | ||
58 | |||
59 | static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb) | ||
60 | { | ||
61 | unsigned int mtu; | ||
62 | |||
63 | if (skb->local_df || !skb_is_gso(skb)) | ||
64 | return false; | ||
65 | |||
66 | mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true); | ||
67 | |||
68 | /* if seglen > mtu, do software segmentation for IP fragmentation on | ||
69 | * output. DF bit cannot be set since ip_forward would have sent | ||
70 | * icmp error. | ||
71 | */ | ||
72 | return skb_gso_network_seglen(skb) > mtu; | ||
73 | } | ||
74 | |||
75 | /* called if GSO skb needs to be fragmented on forward */ | ||
76 | static int ip_forward_finish_gso(struct sk_buff *skb) | ||
77 | { | ||
78 | struct dst_entry *dst = skb_dst(skb); | ||
79 | netdev_features_t features; | ||
80 | struct sk_buff *segs; | ||
81 | int ret = 0; | ||
82 | |||
83 | features = netif_skb_dev_features(skb, dst->dev); | ||
84 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | ||
85 | if (IS_ERR(segs)) { | ||
86 | kfree_skb(skb); | ||
87 | return -ENOMEM; | ||
88 | } | ||
89 | |||
90 | consume_skb(skb); | ||
91 | |||
92 | do { | ||
93 | struct sk_buff *nskb = segs->next; | ||
94 | int err; | ||
95 | |||
96 | segs->next = NULL; | ||
97 | err = dst_output(segs); | ||
98 | |||
99 | if (err && ret == 0) | ||
100 | ret = err; | ||
101 | segs = nskb; | ||
102 | } while (segs); | ||
103 | |||
104 | return ret; | ||
105 | } | ||
106 | |||
42 | static int ip_forward_finish(struct sk_buff *skb) | 107 | static int ip_forward_finish(struct sk_buff *skb) |
43 | { | 108 | { |
44 | struct ip_options *opt = &(IPCB(skb)->opt); | 109 | struct ip_options *opt = &(IPCB(skb)->opt); |
@@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb) | |||
49 | if (unlikely(opt->optlen)) | 114 | if (unlikely(opt->optlen)) |
50 | ip_forward_options(skb); | 115 | ip_forward_options(skb); |
51 | 116 | ||
117 | if (ip_gso_exceeds_dst_mtu(skb)) | ||
118 | return ip_forward_finish_gso(skb); | ||
119 | |||
52 | return dst_output(skb); | 120 | return dst_output(skb); |
53 | } | 121 | } |
54 | 122 | ||
@@ -91,8 +159,7 @@ int ip_forward(struct sk_buff *skb) | |||
91 | 159 | ||
92 | IPCB(skb)->flags |= IPSKB_FORWARDED; | 160 | IPCB(skb)->flags |= IPSKB_FORWARDED; |
93 | mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); | 161 | mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); |
94 | if (unlikely(skb->len > mtu && !skb_is_gso(skb) && | 162 | if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) { |
95 | (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { | ||
96 | IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); | 163 | IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); |
97 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, | 164 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, |
98 | htonl(mtu)); | 165 | htonl(mtu)); |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index efa1138fa523..b3e86ea7b71b 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -273,7 +273,7 @@ static int __init ic_open_devs(void) | |||
273 | 273 | ||
274 | msleep(1); | 274 | msleep(1); |
275 | 275 | ||
276 | if time_before(jiffies, next_msg) | 276 | if (time_before(jiffies, next_msg)) |
277 | continue; | 277 | continue; |
278 | 278 | ||
279 | elapsed = jiffies_to_msecs(jiffies - start); | 279 | elapsed = jiffies_to_msecs(jiffies - start); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 25071b48921c..4c011ec69ed4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1597,6 +1597,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1597 | rth->rt_gateway = 0; | 1597 | rth->rt_gateway = 0; |
1598 | rth->rt_uses_gateway = 0; | 1598 | rth->rt_uses_gateway = 0; |
1599 | INIT_LIST_HEAD(&rth->rt_uncached); | 1599 | INIT_LIST_HEAD(&rth->rt_uncached); |
1600 | RT_CACHE_STAT_INC(in_slow_tot); | ||
1600 | 1601 | ||
1601 | rth->dst.input = ip_forward; | 1602 | rth->dst.input = ip_forward; |
1602 | rth->dst.output = ip_output; | 1603 | rth->dst.output = ip_output; |
@@ -1695,10 +1696,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1695 | fl4.daddr = daddr; | 1696 | fl4.daddr = daddr; |
1696 | fl4.saddr = saddr; | 1697 | fl4.saddr = saddr; |
1697 | err = fib_lookup(net, &fl4, &res); | 1698 | err = fib_lookup(net, &fl4, &res); |
1698 | if (err != 0) | 1699 | if (err != 0) { |
1700 | if (!IN_DEV_FORWARD(in_dev)) | ||
1701 | err = -EHOSTUNREACH; | ||
1699 | goto no_route; | 1702 | goto no_route; |
1700 | 1703 | } | |
1701 | RT_CACHE_STAT_INC(in_slow_tot); | ||
1702 | 1704 | ||
1703 | if (res.type == RTN_BROADCAST) | 1705 | if (res.type == RTN_BROADCAST) |
1704 | goto brd_input; | 1706 | goto brd_input; |
@@ -1712,8 +1714,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1712 | goto local_input; | 1714 | goto local_input; |
1713 | } | 1715 | } |
1714 | 1716 | ||
1715 | if (!IN_DEV_FORWARD(in_dev)) | 1717 | if (!IN_DEV_FORWARD(in_dev)) { |
1718 | err = -EHOSTUNREACH; | ||
1716 | goto no_route; | 1719 | goto no_route; |
1720 | } | ||
1717 | if (res.type != RTN_UNICAST) | 1721 | if (res.type != RTN_UNICAST) |
1718 | goto martian_destination; | 1722 | goto martian_destination; |
1719 | 1723 | ||
@@ -1768,6 +1772,7 @@ local_input: | |||
1768 | rth->rt_gateway = 0; | 1772 | rth->rt_gateway = 0; |
1769 | rth->rt_uses_gateway = 0; | 1773 | rth->rt_uses_gateway = 0; |
1770 | INIT_LIST_HEAD(&rth->rt_uncached); | 1774 | INIT_LIST_HEAD(&rth->rt_uncached); |
1775 | RT_CACHE_STAT_INC(in_slow_tot); | ||
1771 | if (res.type == RTN_UNREACHABLE) { | 1776 | if (res.type == RTN_UNREACHABLE) { |
1772 | rth->dst.input= ip_error; | 1777 | rth->dst.input= ip_error; |
1773 | rth->dst.error= -err; | 1778 | rth->dst.error= -err; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ad235690684c..fdbfeca36d63 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -2783,6 +2783,8 @@ static void addrconf_gre_config(struct net_device *dev) | |||
2783 | ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); | 2783 | ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); |
2784 | if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) | 2784 | if (!ipv6_generate_eui64(addr.s6_addr + 8, dev)) |
2785 | addrconf_add_linklocal(idev, &addr); | 2785 | addrconf_add_linklocal(idev, &addr); |
2786 | else | ||
2787 | addrconf_prefix_route(&addr, 64, dev, 0, 0); | ||
2786 | } | 2788 | } |
2787 | #endif | 2789 | #endif |
2788 | 2790 | ||
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ef02b26ccf81..070a2fae2375 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -342,6 +342,20 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) | |||
342 | return mtu; | 342 | return mtu; |
343 | } | 343 | } |
344 | 344 | ||
345 | static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) | ||
346 | { | ||
347 | if (skb->len <= mtu || skb->local_df) | ||
348 | return false; | ||
349 | |||
350 | if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) | ||
351 | return true; | ||
352 | |||
353 | if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) | ||
354 | return false; | ||
355 | |||
356 | return true; | ||
357 | } | ||
358 | |||
345 | int ip6_forward(struct sk_buff *skb) | 359 | int ip6_forward(struct sk_buff *skb) |
346 | { | 360 | { |
347 | struct dst_entry *dst = skb_dst(skb); | 361 | struct dst_entry *dst = skb_dst(skb); |
@@ -466,8 +480,7 @@ int ip6_forward(struct sk_buff *skb) | |||
466 | if (mtu < IPV6_MIN_MTU) | 480 | if (mtu < IPV6_MIN_MTU) |
467 | mtu = IPV6_MIN_MTU; | 481 | mtu = IPV6_MIN_MTU; |
468 | 482 | ||
469 | if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) || | 483 | if (ip6_pkt_too_big(skb, mtu)) { |
470 | (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { | ||
471 | /* Again, force OUTPUT device used as source address */ | 484 | /* Again, force OUTPUT device used as source address */ |
472 | skb->dev = dst->dev; | 485 | skb->dev = dst->dev; |
473 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 486 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index d6d1f1df9119..ce1c44370610 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1057,7 +1057,8 @@ static void ieee80211_uninit(struct net_device *dev) | |||
1057 | 1057 | ||
1058 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, | 1058 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, |
1059 | struct sk_buff *skb, | 1059 | struct sk_buff *skb, |
1060 | void *accel_priv) | 1060 | void *accel_priv, |
1061 | select_queue_fallback_t fallback) | ||
1061 | { | 1062 | { |
1062 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); | 1063 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); |
1063 | } | 1064 | } |
@@ -1075,7 +1076,8 @@ static const struct net_device_ops ieee80211_dataif_ops = { | |||
1075 | 1076 | ||
1076 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, | 1077 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, |
1077 | struct sk_buff *skb, | 1078 | struct sk_buff *skb, |
1078 | void *accel_priv) | 1079 | void *accel_priv, |
1080 | select_queue_fallback_t fallback) | ||
1079 | { | 1081 | { |
1080 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1082 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1081 | struct ieee80211_local *local = sdata->local; | 1083 | struct ieee80211_local *local = sdata->local; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 6a2bb37506c5..48a6a93db296 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -308,11 +308,27 @@ static bool packet_use_direct_xmit(const struct packet_sock *po) | |||
308 | return po->xmit == packet_direct_xmit; | 308 | return po->xmit == packet_direct_xmit; |
309 | } | 309 | } |
310 | 310 | ||
311 | static u16 packet_pick_tx_queue(struct net_device *dev) | 311 | static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) |
312 | { | 312 | { |
313 | return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; | 313 | return (u16) raw_smp_processor_id() % dev->real_num_tx_queues; |
314 | } | 314 | } |
315 | 315 | ||
316 | static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) | ||
317 | { | ||
318 | const struct net_device_ops *ops = dev->netdev_ops; | ||
319 | u16 queue_index; | ||
320 | |||
321 | if (ops->ndo_select_queue) { | ||
322 | queue_index = ops->ndo_select_queue(dev, skb, NULL, | ||
323 | __packet_pick_tx_queue); | ||
324 | queue_index = netdev_cap_txqueue(dev, queue_index); | ||
325 | } else { | ||
326 | queue_index = __packet_pick_tx_queue(dev, skb); | ||
327 | } | ||
328 | |||
329 | skb_set_queue_mapping(skb, queue_index); | ||
330 | } | ||
331 | |||
316 | /* register_prot_hook must be invoked with the po->bind_lock held, | 332 | /* register_prot_hook must be invoked with the po->bind_lock held, |
317 | * or from a context in which asynchronous accesses to the packet | 333 | * or from a context in which asynchronous accesses to the packet |
318 | * socket is not possible (packet_create()). | 334 | * socket is not possible (packet_create()). |
@@ -2285,7 +2301,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | |||
2285 | } | 2301 | } |
2286 | } | 2302 | } |
2287 | 2303 | ||
2288 | skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); | 2304 | packet_pick_tx_queue(dev, skb); |
2305 | |||
2289 | skb->destructor = tpacket_destruct_skb; | 2306 | skb->destructor = tpacket_destruct_skb; |
2290 | __packet_set_status(po, ph, TP_STATUS_SENDING); | 2307 | __packet_set_status(po, ph, TP_STATUS_SENDING); |
2291 | packet_inc_pending(&po->tx_ring); | 2308 | packet_inc_pending(&po->tx_ring); |
@@ -2499,7 +2516,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2499 | skb->dev = dev; | 2516 | skb->dev = dev; |
2500 | skb->priority = sk->sk_priority; | 2517 | skb->priority = sk->sk_priority; |
2501 | skb->mark = sk->sk_mark; | 2518 | skb->mark = sk->sk_mark; |
2502 | skb_set_queue_mapping(skb, packet_pick_tx_queue(dev)); | 2519 | |
2520 | packet_pick_tx_queue(dev, skb); | ||
2503 | 2521 | ||
2504 | if (po->has_vnet_hdr) { | 2522 | if (po->has_vnet_hdr) { |
2505 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | 2523 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
@@ -3786,7 +3804,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
3786 | */ | 3804 | */ |
3787 | if (!tx_ring) | 3805 | if (!tx_ring) |
3788 | init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); | 3806 | init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring); |
3789 | break; | 3807 | break; |
3790 | default: | 3808 | default: |
3791 | break; | 3809 | break; |
3792 | } | 3810 | } |
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index a255d0200a59..fefeeb73f15f 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c | |||
@@ -15,6 +15,11 @@ | |||
15 | * | 15 | * |
16 | * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> | 16 | * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> |
17 | * University of Oslo, Norway. | 17 | * University of Oslo, Norway. |
18 | * | ||
19 | * References: | ||
20 | * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00 | ||
21 | * IEEE Conference on High Performance Switching and Routing 2013 : | ||
22 | * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem" | ||
18 | */ | 23 | */ |
19 | 24 | ||
20 | #include <linux/module.h> | 25 | #include <linux/module.h> |
@@ -36,7 +41,7 @@ struct pie_params { | |||
36 | psched_time_t target; /* user specified target delay in pschedtime */ | 41 | psched_time_t target; /* user specified target delay in pschedtime */ |
37 | u32 tupdate; /* timer frequency (in jiffies) */ | 42 | u32 tupdate; /* timer frequency (in jiffies) */ |
38 | u32 limit; /* number of packets that can be enqueued */ | 43 | u32 limit; /* number of packets that can be enqueued */ |
39 | u32 alpha; /* alpha and beta are between -4 and 4 */ | 44 | u32 alpha; /* alpha and beta are between 0 and 32 */ |
40 | u32 beta; /* and are used for shift relative to 1 */ | 45 | u32 beta; /* and are used for shift relative to 1 */ |
41 | bool ecn; /* true if ecn is enabled */ | 46 | bool ecn; /* true if ecn is enabled */ |
42 | bool bytemode; /* to scale drop early prob based on pkt size */ | 47 | bool bytemode; /* to scale drop early prob based on pkt size */ |
@@ -326,10 +331,16 @@ static void calculate_probability(struct Qdisc *sch) | |||
326 | if (qdelay == 0 && qlen != 0) | 331 | if (qdelay == 0 && qlen != 0) |
327 | update_prob = false; | 332 | update_prob = false; |
328 | 333 | ||
329 | /* Add ranges for alpha and beta, more aggressive for high dropping | 334 | /* In the algorithm, alpha and beta are between 0 and 2 with typical |
330 | * mode and gentle steps for light dropping mode | 335 | * value for alpha as 0.125. In this implementation, we use values 0-32 |
331 | * In light dropping mode, take gentle steps; in medium dropping mode, | 336 | * passed from user space to represent this. Also, alpha and beta have |
332 | * take medium steps; in high dropping mode, take big steps. | 337 | * unit of HZ and need to be scaled before they can used to update |
338 | * probability. alpha/beta are updated locally below by 1) scaling them | ||
339 | * appropriately 2) scaling down by 16 to come to 0-2 range. | ||
340 | * Please see paper for details. | ||
341 | * | ||
342 | * We scale alpha and beta differently depending on whether we are in | ||
343 | * light, medium or high dropping mode. | ||
333 | */ | 344 | */ |
334 | if (q->vars.prob < MAX_PROB / 100) { | 345 | if (q->vars.prob < MAX_PROB / 100) { |
335 | alpha = | 346 | alpha = |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 5ae609200674..f558433537b8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1367,44 +1367,35 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc) | |||
1367 | return false; | 1367 | return false; |
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | /* Increase asoc's rwnd by len and send any window update SACK if needed. */ | 1370 | /* Update asoc's rwnd for the approximated state in the buffer, |
1371 | void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) | 1371 | * and check whether SACK needs to be sent. |
1372 | */ | ||
1373 | void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer) | ||
1372 | { | 1374 | { |
1375 | int rx_count; | ||
1373 | struct sctp_chunk *sack; | 1376 | struct sctp_chunk *sack; |
1374 | struct timer_list *timer; | 1377 | struct timer_list *timer; |
1375 | 1378 | ||
1376 | if (asoc->rwnd_over) { | 1379 | if (asoc->ep->rcvbuf_policy) |
1377 | if (asoc->rwnd_over >= len) { | 1380 | rx_count = atomic_read(&asoc->rmem_alloc); |
1378 | asoc->rwnd_over -= len; | 1381 | else |
1379 | } else { | 1382 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); |
1380 | asoc->rwnd += (len - asoc->rwnd_over); | ||
1381 | asoc->rwnd_over = 0; | ||
1382 | } | ||
1383 | } else { | ||
1384 | asoc->rwnd += len; | ||
1385 | } | ||
1386 | 1383 | ||
1387 | /* If we had window pressure, start recovering it | 1384 | if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0) |
1388 | * once our rwnd had reached the accumulated pressure | 1385 | asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1; |
1389 | * threshold. The idea is to recover slowly, but up | 1386 | else |
1390 | * to the initial advertised window. | 1387 | asoc->rwnd = 0; |
1391 | */ | ||
1392 | if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { | ||
1393 | int change = min(asoc->pathmtu, asoc->rwnd_press); | ||
1394 | asoc->rwnd += change; | ||
1395 | asoc->rwnd_press -= change; | ||
1396 | } | ||
1397 | 1388 | ||
1398 | pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", | 1389 | pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n", |
1399 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, | 1390 | __func__, asoc, asoc->rwnd, rx_count, |
1400 | asoc->a_rwnd); | 1391 | asoc->base.sk->sk_rcvbuf); |
1401 | 1392 | ||
1402 | /* Send a window update SACK if the rwnd has increased by at least the | 1393 | /* Send a window update SACK if the rwnd has increased by at least the |
1403 | * minimum of the association's PMTU and half of the receive buffer. | 1394 | * minimum of the association's PMTU and half of the receive buffer. |
1404 | * The algorithm used is similar to the one described in | 1395 | * The algorithm used is similar to the one described in |
1405 | * Section 4.2.3.3 of RFC 1122. | 1396 | * Section 4.2.3.3 of RFC 1122. |
1406 | */ | 1397 | */ |
1407 | if (sctp_peer_needs_update(asoc)) { | 1398 | if (update_peer && sctp_peer_needs_update(asoc)) { |
1408 | asoc->a_rwnd = asoc->rwnd; | 1399 | asoc->a_rwnd = asoc->rwnd; |
1409 | 1400 | ||
1410 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " | 1401 | pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " |
@@ -1426,45 +1417,6 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) | |||
1426 | } | 1417 | } |
1427 | } | 1418 | } |
1428 | 1419 | ||
1429 | /* Decrease asoc's rwnd by len. */ | ||
1430 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) | ||
1431 | { | ||
1432 | int rx_count; | ||
1433 | int over = 0; | ||
1434 | |||
1435 | if (unlikely(!asoc->rwnd || asoc->rwnd_over)) | ||
1436 | pr_debug("%s: association:%p has asoc->rwnd:%u, " | ||
1437 | "asoc->rwnd_over:%u!\n", __func__, asoc, | ||
1438 | asoc->rwnd, asoc->rwnd_over); | ||
1439 | |||
1440 | if (asoc->ep->rcvbuf_policy) | ||
1441 | rx_count = atomic_read(&asoc->rmem_alloc); | ||
1442 | else | ||
1443 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | ||
1444 | |||
1445 | /* If we've reached or overflowed our receive buffer, announce | ||
1446 | * a 0 rwnd if rwnd would still be positive. Store the | ||
1447 | * the potential pressure overflow so that the window can be restored | ||
1448 | * back to original value. | ||
1449 | */ | ||
1450 | if (rx_count >= asoc->base.sk->sk_rcvbuf) | ||
1451 | over = 1; | ||
1452 | |||
1453 | if (asoc->rwnd >= len) { | ||
1454 | asoc->rwnd -= len; | ||
1455 | if (over) { | ||
1456 | asoc->rwnd_press += asoc->rwnd; | ||
1457 | asoc->rwnd = 0; | ||
1458 | } | ||
1459 | } else { | ||
1460 | asoc->rwnd_over = len - asoc->rwnd; | ||
1461 | asoc->rwnd = 0; | ||
1462 | } | ||
1463 | |||
1464 | pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", | ||
1465 | __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, | ||
1466 | asoc->rwnd_press); | ||
1467 | } | ||
1468 | 1420 | ||
1469 | /* Build the bind address list for the association based on info from the | 1421 | /* Build the bind address list for the association based on info from the |
1470 | * local endpoint and the remote peer. | 1422 | * local endpoint and the remote peer. |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 483dcd71b3c5..591b44d3b7de 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -6176,7 +6176,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
6176 | * PMTU. In cases, such as loopback, this might be a rather | 6176 | * PMTU. In cases, such as loopback, this might be a rather |
6177 | * large spill over. | 6177 | * large spill over. |
6178 | */ | 6178 | */ |
6179 | if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || | 6179 | if ((!chunk->data_accepted) && (!asoc->rwnd || |
6180 | (datalen > asoc->rwnd + asoc->frag_point))) { | 6180 | (datalen > asoc->rwnd + asoc->frag_point))) { |
6181 | 6181 | ||
6182 | /* If this is the next TSN, consider reneging to make | 6182 | /* If this is the next TSN, consider reneging to make |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9e91d6e5df63..981aaf8b6ace 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -64,6 +64,7 @@ | |||
64 | #include <linux/crypto.h> | 64 | #include <linux/crypto.h> |
65 | #include <linux/slab.h> | 65 | #include <linux/slab.h> |
66 | #include <linux/file.h> | 66 | #include <linux/file.h> |
67 | #include <linux/compat.h> | ||
67 | 68 | ||
68 | #include <net/ip.h> | 69 | #include <net/ip.h> |
69 | #include <net/icmp.h> | 70 | #include <net/icmp.h> |
@@ -1368,11 +1369,19 @@ static int sctp_setsockopt_connectx(struct sock *sk, | |||
1368 | /* | 1369 | /* |
1369 | * New (hopefully final) interface for the API. | 1370 | * New (hopefully final) interface for the API. |
1370 | * We use the sctp_getaddrs_old structure so that use-space library | 1371 | * We use the sctp_getaddrs_old structure so that use-space library |
1371 | * can avoid any unnecessary allocations. The only defferent part | 1372 | * can avoid any unnecessary allocations. The only different part |
1372 | * is that we store the actual length of the address buffer into the | 1373 | * is that we store the actual length of the address buffer into the |
1373 | * addrs_num structure member. That way we can re-use the existing | 1374 | * addrs_num structure member. That way we can re-use the existing |
1374 | * code. | 1375 | * code. |
1375 | */ | 1376 | */ |
1377 | #ifdef CONFIG_COMPAT | ||
1378 | struct compat_sctp_getaddrs_old { | ||
1379 | sctp_assoc_t assoc_id; | ||
1380 | s32 addr_num; | ||
1381 | compat_uptr_t addrs; /* struct sockaddr * */ | ||
1382 | }; | ||
1383 | #endif | ||
1384 | |||
1376 | static int sctp_getsockopt_connectx3(struct sock *sk, int len, | 1385 | static int sctp_getsockopt_connectx3(struct sock *sk, int len, |
1377 | char __user *optval, | 1386 | char __user *optval, |
1378 | int __user *optlen) | 1387 | int __user *optlen) |
@@ -1381,16 +1390,30 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len, | |||
1381 | sctp_assoc_t assoc_id = 0; | 1390 | sctp_assoc_t assoc_id = 0; |
1382 | int err = 0; | 1391 | int err = 0; |
1383 | 1392 | ||
1384 | if (len < sizeof(param)) | 1393 | #ifdef CONFIG_COMPAT |
1385 | return -EINVAL; | 1394 | if (is_compat_task()) { |
1395 | struct compat_sctp_getaddrs_old param32; | ||
1386 | 1396 | ||
1387 | if (copy_from_user(¶m, optval, sizeof(param))) | 1397 | if (len < sizeof(param32)) |
1388 | return -EFAULT; | 1398 | return -EINVAL; |
1399 | if (copy_from_user(¶m32, optval, sizeof(param32))) | ||
1400 | return -EFAULT; | ||
1389 | 1401 | ||
1390 | err = __sctp_setsockopt_connectx(sk, | 1402 | param.assoc_id = param32.assoc_id; |
1391 | (struct sockaddr __user *)param.addrs, | 1403 | param.addr_num = param32.addr_num; |
1392 | param.addr_num, &assoc_id); | 1404 | param.addrs = compat_ptr(param32.addrs); |
1405 | } else | ||
1406 | #endif | ||
1407 | { | ||
1408 | if (len < sizeof(param)) | ||
1409 | return -EINVAL; | ||
1410 | if (copy_from_user(¶m, optval, sizeof(param))) | ||
1411 | return -EFAULT; | ||
1412 | } | ||
1393 | 1413 | ||
1414 | err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) | ||
1415 | param.addrs, param.addr_num, | ||
1416 | &assoc_id); | ||
1394 | if (err == 0 || err == -EINPROGRESS) { | 1417 | if (err == 0 || err == -EINPROGRESS) { |
1395 | if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) | 1418 | if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) |
1396 | return -EFAULT; | 1419 | return -EFAULT; |
@@ -2092,12 +2115,6 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
2092 | sctp_skb_pull(skb, copied); | 2115 | sctp_skb_pull(skb, copied); |
2093 | skb_queue_head(&sk->sk_receive_queue, skb); | 2116 | skb_queue_head(&sk->sk_receive_queue, skb); |
2094 | 2117 | ||
2095 | /* When only partial message is copied to the user, increase | ||
2096 | * rwnd by that amount. If all the data in the skb is read, | ||
2097 | * rwnd is updated when the event is freed. | ||
2098 | */ | ||
2099 | if (!sctp_ulpevent_is_notification(event)) | ||
2100 | sctp_assoc_rwnd_increase(event->asoc, copied); | ||
2101 | goto out; | 2118 | goto out; |
2102 | } else if ((event->msg_flags & MSG_NOTIFICATION) || | 2119 | } else if ((event->msg_flags & MSG_NOTIFICATION) || |
2103 | (event->msg_flags & MSG_EOR)) | 2120 | (event->msg_flags & MSG_EOR)) |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 7135e617ab0f..35c8923b5554 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -151,6 +151,7 @@ static struct ctl_table sctp_net_table[] = { | |||
151 | }, | 151 | }, |
152 | { | 152 | { |
153 | .procname = "cookie_hmac_alg", | 153 | .procname = "cookie_hmac_alg", |
154 | .data = &init_net.sctp.sctp_hmac_alg, | ||
154 | .maxlen = 8, | 155 | .maxlen = 8, |
155 | .mode = 0644, | 156 | .mode = 0644, |
156 | .proc_handler = proc_sctp_do_hmac_alg, | 157 | .proc_handler = proc_sctp_do_hmac_alg, |
@@ -401,15 +402,18 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, | |||
401 | 402 | ||
402 | int sctp_sysctl_net_register(struct net *net) | 403 | int sctp_sysctl_net_register(struct net *net) |
403 | { | 404 | { |
404 | struct ctl_table *table; | 405 | struct ctl_table *table = sctp_net_table; |
405 | int i; | 406 | |
407 | if (!net_eq(net, &init_net)) { | ||
408 | int i; | ||
406 | 409 | ||
407 | table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); | 410 | table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); |
408 | if (!table) | 411 | if (!table) |
409 | return -ENOMEM; | 412 | return -ENOMEM; |
410 | 413 | ||
411 | for (i = 0; table[i].data; i++) | 414 | for (i = 0; table[i].data; i++) |
412 | table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; | 415 | table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; |
416 | } | ||
413 | 417 | ||
414 | net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); | 418 | net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); |
415 | return 0; | 419 | return 0; |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 85c64658bd0b..8d198ae03606 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, | |||
989 | skb = sctp_event2skb(event); | 989 | skb = sctp_event2skb(event); |
990 | /* Set the owner and charge rwnd for bytes received. */ | 990 | /* Set the owner and charge rwnd for bytes received. */ |
991 | sctp_ulpevent_set_owner(event, asoc); | 991 | sctp_ulpevent_set_owner(event, asoc); |
992 | sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); | 992 | sctp_assoc_rwnd_update(asoc, false); |
993 | 993 | ||
994 | if (!skb->data_len) | 994 | if (!skb->data_len) |
995 | return; | 995 | return; |
@@ -1011,6 +1011,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | |||
1011 | { | 1011 | { |
1012 | struct sk_buff *skb, *frag; | 1012 | struct sk_buff *skb, *frag; |
1013 | unsigned int len; | 1013 | unsigned int len; |
1014 | struct sctp_association *asoc; | ||
1014 | 1015 | ||
1015 | /* Current stack structures assume that the rcv buffer is | 1016 | /* Current stack structures assume that the rcv buffer is |
1016 | * per socket. For UDP style sockets this is not true as | 1017 | * per socket. For UDP style sockets this is not true as |
@@ -1035,8 +1036,11 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | |||
1035 | } | 1036 | } |
1036 | 1037 | ||
1037 | done: | 1038 | done: |
1038 | sctp_assoc_rwnd_increase(event->asoc, len); | 1039 | asoc = event->asoc; |
1040 | sctp_association_hold(asoc); | ||
1039 | sctp_ulpevent_release_owner(event); | 1041 | sctp_ulpevent_release_owner(event); |
1042 | sctp_assoc_rwnd_update(asoc, true); | ||
1043 | sctp_association_put(asoc); | ||
1040 | } | 1044 | } |
1041 | 1045 | ||
1042 | static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) | 1046 | static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 6c0513a7f992..36e431ee1c90 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -108,6 +108,7 @@ struct gss_auth { | |||
108 | static DEFINE_SPINLOCK(pipe_version_lock); | 108 | static DEFINE_SPINLOCK(pipe_version_lock); |
109 | static struct rpc_wait_queue pipe_version_rpc_waitqueue; | 109 | static struct rpc_wait_queue pipe_version_rpc_waitqueue; |
110 | static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); | 110 | static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); |
111 | static void gss_put_auth(struct gss_auth *gss_auth); | ||
111 | 112 | ||
112 | static void gss_free_ctx(struct gss_cl_ctx *); | 113 | static void gss_free_ctx(struct gss_cl_ctx *); |
113 | static const struct rpc_pipe_ops gss_upcall_ops_v0; | 114 | static const struct rpc_pipe_ops gss_upcall_ops_v0; |
@@ -320,6 +321,7 @@ gss_release_msg(struct gss_upcall_msg *gss_msg) | |||
320 | if (gss_msg->ctx != NULL) | 321 | if (gss_msg->ctx != NULL) |
321 | gss_put_ctx(gss_msg->ctx); | 322 | gss_put_ctx(gss_msg->ctx); |
322 | rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); | 323 | rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); |
324 | gss_put_auth(gss_msg->auth); | ||
323 | kfree(gss_msg); | 325 | kfree(gss_msg); |
324 | } | 326 | } |
325 | 327 | ||
@@ -498,9 +500,12 @@ gss_alloc_msg(struct gss_auth *gss_auth, | |||
498 | default: | 500 | default: |
499 | err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name); | 501 | err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name); |
500 | if (err) | 502 | if (err) |
501 | goto err_free_msg; | 503 | goto err_put_pipe_version; |
502 | }; | 504 | }; |
505 | kref_get(&gss_auth->kref); | ||
503 | return gss_msg; | 506 | return gss_msg; |
507 | err_put_pipe_version: | ||
508 | put_pipe_version(gss_auth->net); | ||
504 | err_free_msg: | 509 | err_free_msg: |
505 | kfree(gss_msg); | 510 | kfree(gss_msg); |
506 | err: | 511 | err: |
@@ -991,6 +996,8 @@ gss_create_new(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) | |||
991 | gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); | 996 | gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); |
992 | if (gss_auth->service == 0) | 997 | if (gss_auth->service == 0) |
993 | goto err_put_mech; | 998 | goto err_put_mech; |
999 | if (!gssd_running(gss_auth->net)) | ||
1000 | goto err_put_mech; | ||
994 | auth = &gss_auth->rpc_auth; | 1001 | auth = &gss_auth->rpc_auth; |
995 | auth->au_cslack = GSS_CRED_SLACK >> 2; | 1002 | auth->au_cslack = GSS_CRED_SLACK >> 2; |
996 | auth->au_rslack = GSS_VERF_SLACK >> 2; | 1003 | auth->au_rslack = GSS_VERF_SLACK >> 2; |
@@ -1062,6 +1069,12 @@ gss_free_callback(struct kref *kref) | |||
1062 | } | 1069 | } |
1063 | 1070 | ||
1064 | static void | 1071 | static void |
1072 | gss_put_auth(struct gss_auth *gss_auth) | ||
1073 | { | ||
1074 | kref_put(&gss_auth->kref, gss_free_callback); | ||
1075 | } | ||
1076 | |||
1077 | static void | ||
1065 | gss_destroy(struct rpc_auth *auth) | 1078 | gss_destroy(struct rpc_auth *auth) |
1066 | { | 1079 | { |
1067 | struct gss_auth *gss_auth = container_of(auth, | 1080 | struct gss_auth *gss_auth = container_of(auth, |
@@ -1082,7 +1095,7 @@ gss_destroy(struct rpc_auth *auth) | |||
1082 | gss_auth->gss_pipe[1] = NULL; | 1095 | gss_auth->gss_pipe[1] = NULL; |
1083 | rpcauth_destroy_credcache(auth); | 1096 | rpcauth_destroy_credcache(auth); |
1084 | 1097 | ||
1085 | kref_put(&gss_auth->kref, gss_free_callback); | 1098 | gss_put_auth(gss_auth); |
1086 | } | 1099 | } |
1087 | 1100 | ||
1088 | /* | 1101 | /* |
@@ -1253,7 +1266,7 @@ gss_destroy_nullcred(struct rpc_cred *cred) | |||
1253 | call_rcu(&cred->cr_rcu, gss_free_cred_callback); | 1266 | call_rcu(&cred->cr_rcu, gss_free_cred_callback); |
1254 | if (ctx) | 1267 | if (ctx) |
1255 | gss_put_ctx(ctx); | 1268 | gss_put_ctx(ctx); |
1256 | kref_put(&gss_auth->kref, gss_free_callback); | 1269 | gss_put_auth(gss_auth); |
1257 | } | 1270 | } |
1258 | 1271 | ||
1259 | static void | 1272 | static void |
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 890a29912d5a..e860d4f7ed2a 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c | |||
@@ -64,7 +64,6 @@ static void xprt_free_allocation(struct rpc_rqst *req) | |||
64 | free_page((unsigned long)xbufp->head[0].iov_base); | 64 | free_page((unsigned long)xbufp->head[0].iov_base); |
65 | xbufp = &req->rq_snd_buf; | 65 | xbufp = &req->rq_snd_buf; |
66 | free_page((unsigned long)xbufp->head[0].iov_base); | 66 | free_page((unsigned long)xbufp->head[0].iov_base); |
67 | list_del(&req->rq_bc_pa_list); | ||
68 | kfree(req); | 67 | kfree(req); |
69 | } | 68 | } |
70 | 69 | ||
@@ -168,8 +167,10 @@ out_free: | |||
168 | /* | 167 | /* |
169 | * Memory allocation failed, free the temporary list | 168 | * Memory allocation failed, free the temporary list |
170 | */ | 169 | */ |
171 | list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) | 170 | list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) { |
171 | list_del(&req->rq_bc_pa_list); | ||
172 | xprt_free_allocation(req); | 172 | xprt_free_allocation(req); |
173 | } | ||
173 | 174 | ||
174 | dprintk("RPC: setup backchannel transport failed\n"); | 175 | dprintk("RPC: setup backchannel transport failed\n"); |
175 | return -ENOMEM; | 176 | return -ENOMEM; |
@@ -198,6 +199,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs) | |||
198 | xprt_dec_alloc_count(xprt, max_reqs); | 199 | xprt_dec_alloc_count(xprt, max_reqs); |
199 | list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { | 200 | list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { |
200 | dprintk("RPC: req=%p\n", req); | 201 | dprintk("RPC: req=%p\n", req); |
202 | list_del(&req->rq_bc_pa_list); | ||
201 | xprt_free_allocation(req); | 203 | xprt_free_allocation(req); |
202 | if (--max_reqs == 0) | 204 | if (--max_reqs == 0) |
203 | break; | 205 | break; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 817a1e523969..0addefca8e77 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -510,6 +510,7 @@ static int xs_nospace(struct rpc_task *task) | |||
510 | struct rpc_rqst *req = task->tk_rqstp; | 510 | struct rpc_rqst *req = task->tk_rqstp; |
511 | struct rpc_xprt *xprt = req->rq_xprt; | 511 | struct rpc_xprt *xprt = req->rq_xprt; |
512 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | 512 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
513 | struct sock *sk = transport->inet; | ||
513 | int ret = -EAGAIN; | 514 | int ret = -EAGAIN; |
514 | 515 | ||
515 | dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", | 516 | dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", |
@@ -527,7 +528,7 @@ static int xs_nospace(struct rpc_task *task) | |||
527 | * window size | 528 | * window size |
528 | */ | 529 | */ |
529 | set_bit(SOCK_NOSPACE, &transport->sock->flags); | 530 | set_bit(SOCK_NOSPACE, &transport->sock->flags); |
530 | transport->inet->sk_write_pending++; | 531 | sk->sk_write_pending++; |
531 | /* ...and wait for more buffer space */ | 532 | /* ...and wait for more buffer space */ |
532 | xprt_wait_for_buffer_space(task, xs_nospace_callback); | 533 | xprt_wait_for_buffer_space(task, xs_nospace_callback); |
533 | } | 534 | } |
@@ -537,6 +538,9 @@ static int xs_nospace(struct rpc_task *task) | |||
537 | } | 538 | } |
538 | 539 | ||
539 | spin_unlock_bh(&xprt->transport_lock); | 540 | spin_unlock_bh(&xprt->transport_lock); |
541 | |||
542 | /* Race breaker in case memory is freed before above code is called */ | ||
543 | sk->sk_write_space(sk); | ||
540 | return ret; | 544 | return ret; |
541 | } | 545 | } |
542 | 546 | ||
diff --git a/net/tipc/core.h b/net/tipc/core.h index 1ff477b0450d..5569d96b4da3 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h | |||
@@ -192,6 +192,7 @@ static inline void k_term_timer(struct timer_list *timer) | |||
192 | 192 | ||
193 | struct tipc_skb_cb { | 193 | struct tipc_skb_cb { |
194 | void *handle; | 194 | void *handle; |
195 | bool deferred; | ||
195 | }; | 196 | }; |
196 | 197 | ||
197 | #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) | 198 | #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) |
diff --git a/net/tipc/link.c b/net/tipc/link.c index d4b5de41b682..da6018beb6eb 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1391,6 +1391,12 @@ static int link_recv_buf_validate(struct sk_buff *buf) | |||
1391 | u32 hdr_size; | 1391 | u32 hdr_size; |
1392 | u32 min_hdr_size; | 1392 | u32 min_hdr_size; |
1393 | 1393 | ||
1394 | /* If this packet comes from the defer queue, the skb has already | ||
1395 | * been validated | ||
1396 | */ | ||
1397 | if (unlikely(TIPC_SKB_CB(buf)->deferred)) | ||
1398 | return 1; | ||
1399 | |||
1394 | if (unlikely(buf->len < MIN_H_SIZE)) | 1400 | if (unlikely(buf->len < MIN_H_SIZE)) |
1395 | return 0; | 1401 | return 0; |
1396 | 1402 | ||
@@ -1703,6 +1709,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
1703 | &l_ptr->newest_deferred_in, buf)) { | 1709 | &l_ptr->newest_deferred_in, buf)) { |
1704 | l_ptr->deferred_inqueue_sz++; | 1710 | l_ptr->deferred_inqueue_sz++; |
1705 | l_ptr->stats.deferred_recv++; | 1711 | l_ptr->stats.deferred_recv++; |
1712 | TIPC_SKB_CB(buf)->deferred = true; | ||
1706 | if ((l_ptr->deferred_inqueue_sz % 16) == 1) | 1713 | if ((l_ptr->deferred_inqueue_sz % 16) == 1) |
1707 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | 1714 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); |
1708 | } else | 1715 | } else |