diff options
Diffstat (limited to 'net')
75 files changed, 724 insertions, 516 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 762896ebfcf5..47c908f1f626 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -530,6 +530,23 @@ static const struct header_ops vlan_header_ops = { | |||
530 | .parse = eth_header_parse, | 530 | .parse = eth_header_parse, |
531 | }; | 531 | }; |
532 | 532 | ||
533 | static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev, | ||
534 | unsigned short type, | ||
535 | const void *daddr, const void *saddr, | ||
536 | unsigned int len) | ||
537 | { | ||
538 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | ||
539 | struct net_device *real_dev = vlan->real_dev; | ||
540 | |||
541 | return dev_hard_header(skb, real_dev, type, daddr, saddr, len); | ||
542 | } | ||
543 | |||
544 | static const struct header_ops vlan_passthru_header_ops = { | ||
545 | .create = vlan_passthru_hard_header, | ||
546 | .rebuild = dev_rebuild_header, | ||
547 | .parse = eth_header_parse, | ||
548 | }; | ||
549 | |||
533 | static struct device_type vlan_type = { | 550 | static struct device_type vlan_type = { |
534 | .name = "vlan", | 551 | .name = "vlan", |
535 | }; | 552 | }; |
@@ -573,7 +590,7 @@ static int vlan_dev_init(struct net_device *dev) | |||
573 | 590 | ||
574 | dev->needed_headroom = real_dev->needed_headroom; | 591 | dev->needed_headroom = real_dev->needed_headroom; |
575 | if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { | 592 | if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { |
576 | dev->header_ops = real_dev->header_ops; | 593 | dev->header_ops = &vlan_passthru_header_ops; |
577 | dev->hard_header_len = real_dev->hard_header_len; | 594 | dev->hard_header_len = real_dev->hard_header_len; |
578 | } else { | 595 | } else { |
579 | dev->header_ops = &vlan_header_ops; | 596 | dev->header_ops = &vlan_header_ops; |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a2b480a90872..b9c8a6eedf45 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -307,9 +307,9 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) | |||
307 | hard_iface->bat_iv.ogm_buff = ogm_buff; | 307 | hard_iface->bat_iv.ogm_buff = ogm_buff; |
308 | 308 | ||
309 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; | 309 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; |
310 | batadv_ogm_packet->header.packet_type = BATADV_IV_OGM; | 310 | batadv_ogm_packet->packet_type = BATADV_IV_OGM; |
311 | batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION; | 311 | batadv_ogm_packet->version = BATADV_COMPAT_VERSION; |
312 | batadv_ogm_packet->header.ttl = 2; | 312 | batadv_ogm_packet->ttl = 2; |
313 | batadv_ogm_packet->flags = BATADV_NO_FLAGS; | 313 | batadv_ogm_packet->flags = BATADV_NO_FLAGS; |
314 | batadv_ogm_packet->reserved = 0; | 314 | batadv_ogm_packet->reserved = 0; |
315 | batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; | 315 | batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; |
@@ -346,7 +346,7 @@ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface) | |||
346 | 346 | ||
347 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; | 347 | batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; |
348 | batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP; | 348 | batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP; |
349 | batadv_ogm_packet->header.ttl = BATADV_TTL; | 349 | batadv_ogm_packet->ttl = BATADV_TTL; |
350 | } | 350 | } |
351 | 351 | ||
352 | /* when do we schedule our own ogm to be sent */ | 352 | /* when do we schedule our own ogm to be sent */ |
@@ -435,7 +435,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, | |||
435 | fwd_str, (packet_num > 0 ? "aggregated " : ""), | 435 | fwd_str, (packet_num > 0 ? "aggregated " : ""), |
436 | batadv_ogm_packet->orig, | 436 | batadv_ogm_packet->orig, |
437 | ntohl(batadv_ogm_packet->seqno), | 437 | ntohl(batadv_ogm_packet->seqno), |
438 | batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl, | 438 | batadv_ogm_packet->tq, batadv_ogm_packet->ttl, |
439 | (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? | 439 | (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? |
440 | "on" : "off"), | 440 | "on" : "off"), |
441 | hard_iface->net_dev->name, | 441 | hard_iface->net_dev->name, |
@@ -491,7 +491,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) | |||
491 | /* multihomed peer assumed | 491 | /* multihomed peer assumed |
492 | * non-primary OGMs are only broadcasted on their interface | 492 | * non-primary OGMs are only broadcasted on their interface |
493 | */ | 493 | */ |
494 | if ((directlink && (batadv_ogm_packet->header.ttl == 1)) || | 494 | if ((directlink && (batadv_ogm_packet->ttl == 1)) || |
495 | (forw_packet->own && (forw_packet->if_incoming != primary_if))) { | 495 | (forw_packet->own && (forw_packet->if_incoming != primary_if))) { |
496 | /* FIXME: what about aggregated packets ? */ | 496 | /* FIXME: what about aggregated packets ? */ |
497 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 497 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
@@ -499,7 +499,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) | |||
499 | (forw_packet->own ? "Sending own" : "Forwarding"), | 499 | (forw_packet->own ? "Sending own" : "Forwarding"), |
500 | batadv_ogm_packet->orig, | 500 | batadv_ogm_packet->orig, |
501 | ntohl(batadv_ogm_packet->seqno), | 501 | ntohl(batadv_ogm_packet->seqno), |
502 | batadv_ogm_packet->header.ttl, | 502 | batadv_ogm_packet->ttl, |
503 | forw_packet->if_incoming->net_dev->name, | 503 | forw_packet->if_incoming->net_dev->name, |
504 | forw_packet->if_incoming->net_dev->dev_addr); | 504 | forw_packet->if_incoming->net_dev->dev_addr); |
505 | 505 | ||
@@ -572,7 +572,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet, | |||
572 | */ | 572 | */ |
573 | if ((!directlink) && | 573 | if ((!directlink) && |
574 | (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) && | 574 | (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) && |
575 | (batadv_ogm_packet->header.ttl != 1) && | 575 | (batadv_ogm_packet->ttl != 1) && |
576 | 576 | ||
577 | /* own packets originating non-primary | 577 | /* own packets originating non-primary |
578 | * interfaces leave only that interface | 578 | * interfaces leave only that interface |
@@ -587,7 +587,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet, | |||
587 | * interface only - we still can aggregate | 587 | * interface only - we still can aggregate |
588 | */ | 588 | */ |
589 | if ((directlink) && | 589 | if ((directlink) && |
590 | (new_bat_ogm_packet->header.ttl == 1) && | 590 | (new_bat_ogm_packet->ttl == 1) && |
591 | (forw_packet->if_incoming == if_incoming) && | 591 | (forw_packet->if_incoming == if_incoming) && |
592 | 592 | ||
593 | /* packets from direct neighbors or | 593 | /* packets from direct neighbors or |
@@ -778,7 +778,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, | |||
778 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 778 | struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
779 | uint16_t tvlv_len; | 779 | uint16_t tvlv_len; |
780 | 780 | ||
781 | if (batadv_ogm_packet->header.ttl <= 1) { | 781 | if (batadv_ogm_packet->ttl <= 1) { |
782 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); | 782 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); |
783 | return; | 783 | return; |
784 | } | 784 | } |
@@ -798,7 +798,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, | |||
798 | 798 | ||
799 | tvlv_len = ntohs(batadv_ogm_packet->tvlv_len); | 799 | tvlv_len = ntohs(batadv_ogm_packet->tvlv_len); |
800 | 800 | ||
801 | batadv_ogm_packet->header.ttl--; | 801 | batadv_ogm_packet->ttl--; |
802 | memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); | 802 | memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN); |
803 | 803 | ||
804 | /* apply hop penalty */ | 804 | /* apply hop penalty */ |
@@ -807,7 +807,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, | |||
807 | 807 | ||
808 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 808 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
809 | "Forwarding packet: tq: %i, ttl: %i\n", | 809 | "Forwarding packet: tq: %i, ttl: %i\n", |
810 | batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl); | 810 | batadv_ogm_packet->tq, batadv_ogm_packet->ttl); |
811 | 811 | ||
812 | /* switch of primaries first hop flag when forwarding */ | 812 | /* switch of primaries first hop flag when forwarding */ |
813 | batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP; | 813 | batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP; |
@@ -972,8 +972,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, | |||
972 | spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock); | 972 | spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock); |
973 | 973 | ||
974 | if (dup_status == BATADV_NO_DUP) { | 974 | if (dup_status == BATADV_NO_DUP) { |
975 | orig_node->last_ttl = batadv_ogm_packet->header.ttl; | 975 | orig_node->last_ttl = batadv_ogm_packet->ttl; |
976 | neigh_node->last_ttl = batadv_ogm_packet->header.ttl; | 976 | neigh_node->last_ttl = batadv_ogm_packet->ttl; |
977 | } | 977 | } |
978 | 978 | ||
979 | batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node); | 979 | batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node); |
@@ -1247,7 +1247,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, | |||
1247 | * packet in an aggregation. Here we expect that the padding | 1247 | * packet in an aggregation. Here we expect that the padding |
1248 | * is always zero (or not 0x01) | 1248 | * is always zero (or not 0x01) |
1249 | */ | 1249 | */ |
1250 | if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM) | 1250 | if (batadv_ogm_packet->packet_type != BATADV_IV_OGM) |
1251 | return; | 1251 | return; |
1252 | 1252 | ||
1253 | /* could be changed by schedule_own_packet() */ | 1253 | /* could be changed by schedule_own_packet() */ |
@@ -1267,8 +1267,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, | |||
1267 | if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig, | 1267 | if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig, |
1268 | batadv_ogm_packet->prev_sender, | 1268 | batadv_ogm_packet->prev_sender, |
1269 | ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq, | 1269 | ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq, |
1270 | batadv_ogm_packet->header.ttl, | 1270 | batadv_ogm_packet->ttl, |
1271 | batadv_ogm_packet->header.version, has_directlink_flag); | 1271 | batadv_ogm_packet->version, has_directlink_flag); |
1272 | 1272 | ||
1273 | rcu_read_lock(); | 1273 | rcu_read_lock(); |
1274 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { | 1274 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { |
@@ -1433,7 +1433,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr, | |||
1433 | * seqno and similar ttl as the non-duplicate | 1433 | * seqno and similar ttl as the non-duplicate |
1434 | */ | 1434 | */ |
1435 | sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); | 1435 | sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); |
1436 | similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; | 1436 | similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->ttl; |
1437 | if (is_bidirect && ((dup_status == BATADV_NO_DUP) || | 1437 | if (is_bidirect && ((dup_status == BATADV_NO_DUP) || |
1438 | (sameseq && similar_ttl))) | 1438 | (sameseq && similar_ttl))) |
1439 | batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, | 1439 | batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, |
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 6c8c3934bd7b..b316a4cb6f14 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
@@ -349,7 +349,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
349 | 349 | ||
350 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 350 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
351 | 351 | ||
352 | switch (unicast_4addr_packet->u.header.packet_type) { | 352 | switch (unicast_4addr_packet->u.packet_type) { |
353 | case BATADV_UNICAST: | 353 | case BATADV_UNICAST: |
354 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 354 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
355 | "* encapsulated within a UNICAST packet\n"); | 355 | "* encapsulated within a UNICAST packet\n"); |
@@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
374 | break; | 374 | break; |
375 | default: | 375 | default: |
376 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n", | 376 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n", |
377 | unicast_4addr_packet->u.header.packet_type); | 377 | unicast_4addr_packet->u.packet_type); |
378 | } | 378 | } |
379 | break; | 379 | break; |
380 | case BATADV_BCAST: | 380 | case BATADV_BCAST: |
@@ -387,7 +387,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
387 | default: | 387 | default: |
388 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 388 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
389 | "* encapsulated within an unknown packet type (0x%x)\n", | 389 | "* encapsulated within an unknown packet type (0x%x)\n", |
390 | unicast_4addr_packet->u.header.packet_type); | 390 | unicast_4addr_packet->u.packet_type); |
391 | } | 391 | } |
392 | } | 392 | } |
393 | 393 | ||
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 271d321b3a04..6ddb6145ffb5 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c | |||
@@ -355,7 +355,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb, | |||
355 | batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES, | 355 | batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES, |
356 | skb->len + ETH_HLEN); | 356 | skb->len + ETH_HLEN); |
357 | 357 | ||
358 | packet->header.ttl--; | 358 | packet->ttl--; |
359 | batadv_send_skb_packet(skb, neigh_node->if_incoming, | 359 | batadv_send_skb_packet(skb, neigh_node->if_incoming, |
360 | neigh_node->addr); | 360 | neigh_node->addr); |
361 | ret = true; | 361 | ret = true; |
@@ -444,9 +444,9 @@ bool batadv_frag_send_packet(struct sk_buff *skb, | |||
444 | goto out_err; | 444 | goto out_err; |
445 | 445 | ||
446 | /* Create one header to be copied to all fragments */ | 446 | /* Create one header to be copied to all fragments */ |
447 | frag_header.header.packet_type = BATADV_UNICAST_FRAG; | 447 | frag_header.packet_type = BATADV_UNICAST_FRAG; |
448 | frag_header.header.version = BATADV_COMPAT_VERSION; | 448 | frag_header.version = BATADV_COMPAT_VERSION; |
449 | frag_header.header.ttl = BATADV_TTL; | 449 | frag_header.ttl = BATADV_TTL; |
450 | frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); | 450 | frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); |
451 | frag_header.reserved = 0; | 451 | frag_header.reserved = 0; |
452 | frag_header.no = 0; | 452 | frag_header.no = 0; |
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index 29ae4efe3543..130cc3217e2b 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c | |||
@@ -194,7 +194,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, | |||
194 | goto free_skb; | 194 | goto free_skb; |
195 | } | 195 | } |
196 | 196 | ||
197 | if (icmp_header->header.packet_type != BATADV_ICMP) { | 197 | if (icmp_header->packet_type != BATADV_ICMP) { |
198 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 198 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
199 | "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); | 199 | "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n"); |
200 | len = -EINVAL; | 200 | len = -EINVAL; |
@@ -243,9 +243,9 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff, | |||
243 | 243 | ||
244 | icmp_header->uid = socket_client->index; | 244 | icmp_header->uid = socket_client->index; |
245 | 245 | ||
246 | if (icmp_header->header.version != BATADV_COMPAT_VERSION) { | 246 | if (icmp_header->version != BATADV_COMPAT_VERSION) { |
247 | icmp_header->msg_type = BATADV_PARAMETER_PROBLEM; | 247 | icmp_header->msg_type = BATADV_PARAMETER_PROBLEM; |
248 | icmp_header->header.version = BATADV_COMPAT_VERSION; | 248 | icmp_header->version = BATADV_COMPAT_VERSION; |
249 | batadv_socket_add_packet(socket_client, icmp_header, | 249 | batadv_socket_add_packet(socket_client, icmp_header, |
250 | packet_len); | 250 | packet_len); |
251 | goto free_skb; | 251 | goto free_skb; |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index c51a5e568f0a..faba0f61ad53 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
@@ -277,7 +277,7 @@ int batadv_max_header_len(void) | |||
277 | sizeof(struct batadv_coded_packet)); | 277 | sizeof(struct batadv_coded_packet)); |
278 | #endif | 278 | #endif |
279 | 279 | ||
280 | return header_len; | 280 | return header_len + ETH_HLEN; |
281 | } | 281 | } |
282 | 282 | ||
283 | /** | 283 | /** |
@@ -383,17 +383,17 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
383 | 383 | ||
384 | batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data; | 384 | batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data; |
385 | 385 | ||
386 | if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) { | 386 | if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) { |
387 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 387 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
388 | "Drop packet: incompatible batman version (%i)\n", | 388 | "Drop packet: incompatible batman version (%i)\n", |
389 | batadv_ogm_packet->header.version); | 389 | batadv_ogm_packet->version); |
390 | goto err_free; | 390 | goto err_free; |
391 | } | 391 | } |
392 | 392 | ||
393 | /* all receive handlers return whether they received or reused | 393 | /* all receive handlers return whether they received or reused |
394 | * the supplied skb. if not, we have to free the skb. | 394 | * the supplied skb. if not, we have to free the skb. |
395 | */ | 395 | */ |
396 | idx = batadv_ogm_packet->header.packet_type; | 396 | idx = batadv_ogm_packet->packet_type; |
397 | ret = (*batadv_rx_handler[idx])(skb, hard_iface); | 397 | ret = (*batadv_rx_handler[idx])(skb, hard_iface); |
398 | 398 | ||
399 | if (ret == NET_RX_DROP) | 399 | if (ret == NET_RX_DROP) |
@@ -426,8 +426,8 @@ static void batadv_recv_handler_init(void) | |||
426 | BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4); | 426 | BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4); |
427 | BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4); | 427 | BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4); |
428 | BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4); | 428 | BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4); |
429 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4); | 429 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4); |
430 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4); | 430 | BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4); |
431 | 431 | ||
432 | /* broadcast packet */ | 432 | /* broadcast packet */ |
433 | batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; | 433 | batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; |
@@ -1119,9 +1119,9 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src, | |||
1119 | skb_reserve(skb, ETH_HLEN); | 1119 | skb_reserve(skb, ETH_HLEN); |
1120 | tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len); | 1120 | tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len); |
1121 | unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff; | 1121 | unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff; |
1122 | unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV; | 1122 | unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV; |
1123 | unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION; | 1123 | unicast_tvlv_packet->version = BATADV_COMPAT_VERSION; |
1124 | unicast_tvlv_packet->header.ttl = BATADV_TTL; | 1124 | unicast_tvlv_packet->ttl = BATADV_TTL; |
1125 | unicast_tvlv_packet->reserved = 0; | 1125 | unicast_tvlv_packet->reserved = 0; |
1126 | unicast_tvlv_packet->tvlv_len = htons(tvlv_len); | 1126 | unicast_tvlv_packet->tvlv_len = htons(tvlv_len); |
1127 | unicast_tvlv_packet->align = 0; | 1127 | unicast_tvlv_packet->align = 0; |
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 351e199bc0af..511d7e1eea38 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c | |||
@@ -722,7 +722,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv, | |||
722 | { | 722 | { |
723 | if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno)) | 723 | if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno)) |
724 | return false; | 724 | return false; |
725 | if (orig_node->last_ttl != ogm_packet->header.ttl + 1) | 725 | if (orig_node->last_ttl != ogm_packet->ttl + 1) |
726 | return false; | 726 | return false; |
727 | if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender)) | 727 | if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender)) |
728 | return false; | 728 | return false; |
@@ -1082,9 +1082,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv, | |||
1082 | coded_packet = (struct batadv_coded_packet *)skb_dest->data; | 1082 | coded_packet = (struct batadv_coded_packet *)skb_dest->data; |
1083 | skb_reset_mac_header(skb_dest); | 1083 | skb_reset_mac_header(skb_dest); |
1084 | 1084 | ||
1085 | coded_packet->header.packet_type = BATADV_CODED; | 1085 | coded_packet->packet_type = BATADV_CODED; |
1086 | coded_packet->header.version = BATADV_COMPAT_VERSION; | 1086 | coded_packet->version = BATADV_COMPAT_VERSION; |
1087 | coded_packet->header.ttl = packet1->header.ttl; | 1087 | coded_packet->ttl = packet1->ttl; |
1088 | 1088 | ||
1089 | /* Info about first unicast packet */ | 1089 | /* Info about first unicast packet */ |
1090 | memcpy(coded_packet->first_source, first_source, ETH_ALEN); | 1090 | memcpy(coded_packet->first_source, first_source, ETH_ALEN); |
@@ -1097,7 +1097,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv, | |||
1097 | memcpy(coded_packet->second_source, second_source, ETH_ALEN); | 1097 | memcpy(coded_packet->second_source, second_source, ETH_ALEN); |
1098 | memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN); | 1098 | memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN); |
1099 | coded_packet->second_crc = packet_id2; | 1099 | coded_packet->second_crc = packet_id2; |
1100 | coded_packet->second_ttl = packet2->header.ttl; | 1100 | coded_packet->second_ttl = packet2->ttl; |
1101 | coded_packet->second_ttvn = packet2->ttvn; | 1101 | coded_packet->second_ttvn = packet2->ttvn; |
1102 | coded_packet->coded_len = htons(coding_len); | 1102 | coded_packet->coded_len = htons(coding_len); |
1103 | 1103 | ||
@@ -1452,7 +1452,7 @@ bool batadv_nc_skb_forward(struct sk_buff *skb, | |||
1452 | /* We only handle unicast packets */ | 1452 | /* We only handle unicast packets */ |
1453 | payload = skb_network_header(skb); | 1453 | payload = skb_network_header(skb); |
1454 | packet = (struct batadv_unicast_packet *)payload; | 1454 | packet = (struct batadv_unicast_packet *)payload; |
1455 | if (packet->header.packet_type != BATADV_UNICAST) | 1455 | if (packet->packet_type != BATADV_UNICAST) |
1456 | goto out; | 1456 | goto out; |
1457 | 1457 | ||
1458 | /* Try to find a coding opportunity and send the skb if one is found */ | 1458 | /* Try to find a coding opportunity and send the skb if one is found */ |
@@ -1505,7 +1505,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv, | |||
1505 | /* Check for supported packet type */ | 1505 | /* Check for supported packet type */ |
1506 | payload = skb_network_header(skb); | 1506 | payload = skb_network_header(skb); |
1507 | packet = (struct batadv_unicast_packet *)payload; | 1507 | packet = (struct batadv_unicast_packet *)payload; |
1508 | if (packet->header.packet_type != BATADV_UNICAST) | 1508 | if (packet->packet_type != BATADV_UNICAST) |
1509 | goto out; | 1509 | goto out; |
1510 | 1510 | ||
1511 | /* Find existing nc_path or create a new */ | 1511 | /* Find existing nc_path or create a new */ |
@@ -1623,7 +1623,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
1623 | ttvn = coded_packet_tmp.second_ttvn; | 1623 | ttvn = coded_packet_tmp.second_ttvn; |
1624 | } else { | 1624 | } else { |
1625 | orig_dest = coded_packet_tmp.first_orig_dest; | 1625 | orig_dest = coded_packet_tmp.first_orig_dest; |
1626 | ttl = coded_packet_tmp.header.ttl; | 1626 | ttl = coded_packet_tmp.ttl; |
1627 | ttvn = coded_packet_tmp.first_ttvn; | 1627 | ttvn = coded_packet_tmp.first_ttvn; |
1628 | } | 1628 | } |
1629 | 1629 | ||
@@ -1648,9 +1648,9 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
1648 | 1648 | ||
1649 | /* Create decoded unicast packet */ | 1649 | /* Create decoded unicast packet */ |
1650 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 1650 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
1651 | unicast_packet->header.packet_type = BATADV_UNICAST; | 1651 | unicast_packet->packet_type = BATADV_UNICAST; |
1652 | unicast_packet->header.version = BATADV_COMPAT_VERSION; | 1652 | unicast_packet->version = BATADV_COMPAT_VERSION; |
1653 | unicast_packet->header.ttl = ttl; | 1653 | unicast_packet->ttl = ttl; |
1654 | memcpy(unicast_packet->dest, orig_dest, ETH_ALEN); | 1654 | memcpy(unicast_packet->dest, orig_dest, ETH_ALEN); |
1655 | unicast_packet->ttvn = ttvn; | 1655 | unicast_packet->ttvn = ttvn; |
1656 | 1656 | ||
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 207459b62966..2dd8f2422550 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h | |||
@@ -155,6 +155,7 @@ enum batadv_tvlv_type { | |||
155 | BATADV_TVLV_ROAM = 0x05, | 155 | BATADV_TVLV_ROAM = 0x05, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | #pragma pack(2) | ||
158 | /* the destination hardware field in the ARP frame is used to | 159 | /* the destination hardware field in the ARP frame is used to |
159 | * transport the claim type and the group id | 160 | * transport the claim type and the group id |
160 | */ | 161 | */ |
@@ -163,24 +164,20 @@ struct batadv_bla_claim_dst { | |||
163 | uint8_t type; /* bla_claimframe */ | 164 | uint8_t type; /* bla_claimframe */ |
164 | __be16 group; /* group id */ | 165 | __be16 group; /* group id */ |
165 | }; | 166 | }; |
166 | 167 | #pragma pack() | |
167 | struct batadv_header { | ||
168 | uint8_t packet_type; | ||
169 | uint8_t version; /* batman version field */ | ||
170 | uint8_t ttl; | ||
171 | /* the parent struct has to add a byte after the header to make | ||
172 | * everything 4 bytes aligned again | ||
173 | */ | ||
174 | }; | ||
175 | 168 | ||
176 | /** | 169 | /** |
177 | * struct batadv_ogm_packet - ogm (routing protocol) packet | 170 | * struct batadv_ogm_packet - ogm (routing protocol) packet |
178 | * @header: common batman packet header | 171 | * @packet_type: batman-adv packet type, part of the general header |
172 | * @version: batman-adv protocol version, part of the genereal header | ||
173 | * @ttl: time to live for this packet, part of the genereal header | ||
179 | * @flags: contains routing relevant flags - see enum batadv_iv_flags | 174 | * @flags: contains routing relevant flags - see enum batadv_iv_flags |
180 | * @tvlv_len: length of tvlv data following the ogm header | 175 | * @tvlv_len: length of tvlv data following the ogm header |
181 | */ | 176 | */ |
182 | struct batadv_ogm_packet { | 177 | struct batadv_ogm_packet { |
183 | struct batadv_header header; | 178 | uint8_t packet_type; |
179 | uint8_t version; | ||
180 | uint8_t ttl; | ||
184 | uint8_t flags; | 181 | uint8_t flags; |
185 | __be32 seqno; | 182 | __be32 seqno; |
186 | uint8_t orig[ETH_ALEN]; | 183 | uint8_t orig[ETH_ALEN]; |
@@ -196,29 +193,51 @@ struct batadv_ogm_packet { | |||
196 | #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet) | 193 | #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet) |
197 | 194 | ||
198 | /** | 195 | /** |
199 | * batadv_icmp_header - common ICMP header | 196 | * batadv_icmp_header - common members among all the ICMP packets |
200 | * @header: common batman header | 197 | * @packet_type: batman-adv packet type, part of the general header |
198 | * @version: batman-adv protocol version, part of the genereal header | ||
199 | * @ttl: time to live for this packet, part of the genereal header | ||
201 | * @msg_type: ICMP packet type | 200 | * @msg_type: ICMP packet type |
202 | * @dst: address of the destination node | 201 | * @dst: address of the destination node |
203 | * @orig: address of the source node | 202 | * @orig: address of the source node |
204 | * @uid: local ICMP socket identifier | 203 | * @uid: local ICMP socket identifier |
204 | * @align: not used - useful for alignment purposes only | ||
205 | * | ||
206 | * This structure is used for ICMP packets parsing only and it is never sent | ||
207 | * over the wire. The alignment field at the end is there to ensure that | ||
208 | * members are padded the same way as they are in real packets. | ||
205 | */ | 209 | */ |
206 | struct batadv_icmp_header { | 210 | struct batadv_icmp_header { |
207 | struct batadv_header header; | 211 | uint8_t packet_type; |
212 | uint8_t version; | ||
213 | uint8_t ttl; | ||
208 | uint8_t msg_type; /* see ICMP message types above */ | 214 | uint8_t msg_type; /* see ICMP message types above */ |
209 | uint8_t dst[ETH_ALEN]; | 215 | uint8_t dst[ETH_ALEN]; |
210 | uint8_t orig[ETH_ALEN]; | 216 | uint8_t orig[ETH_ALEN]; |
211 | uint8_t uid; | 217 | uint8_t uid; |
218 | uint8_t align[3]; | ||
212 | }; | 219 | }; |
213 | 220 | ||
214 | /** | 221 | /** |
215 | * batadv_icmp_packet - ICMP packet | 222 | * batadv_icmp_packet - ICMP packet |
216 | * @icmph: common ICMP header | 223 | * @packet_type: batman-adv packet type, part of the general header |
224 | * @version: batman-adv protocol version, part of the genereal header | ||
225 | * @ttl: time to live for this packet, part of the genereal header | ||
226 | * @msg_type: ICMP packet type | ||
227 | * @dst: address of the destination node | ||
228 | * @orig: address of the source node | ||
229 | * @uid: local ICMP socket identifier | ||
217 | * @reserved: not used - useful for alignment | 230 | * @reserved: not used - useful for alignment |
218 | * @seqno: ICMP sequence number | 231 | * @seqno: ICMP sequence number |
219 | */ | 232 | */ |
220 | struct batadv_icmp_packet { | 233 | struct batadv_icmp_packet { |
221 | struct batadv_icmp_header icmph; | 234 | uint8_t packet_type; |
235 | uint8_t version; | ||
236 | uint8_t ttl; | ||
237 | uint8_t msg_type; /* see ICMP message types above */ | ||
238 | uint8_t dst[ETH_ALEN]; | ||
239 | uint8_t orig[ETH_ALEN]; | ||
240 | uint8_t uid; | ||
222 | uint8_t reserved; | 241 | uint8_t reserved; |
223 | __be16 seqno; | 242 | __be16 seqno; |
224 | }; | 243 | }; |
@@ -227,13 +246,25 @@ struct batadv_icmp_packet { | |||
227 | 246 | ||
228 | /** | 247 | /** |
229 | * batadv_icmp_packet_rr - ICMP RouteRecord packet | 248 | * batadv_icmp_packet_rr - ICMP RouteRecord packet |
230 | * @icmph: common ICMP header | 249 | * @packet_type: batman-adv packet type, part of the general header |
250 | * @version: batman-adv protocol version, part of the genereal header | ||
251 | * @ttl: time to live for this packet, part of the genereal header | ||
252 | * @msg_type: ICMP packet type | ||
253 | * @dst: address of the destination node | ||
254 | * @orig: address of the source node | ||
255 | * @uid: local ICMP socket identifier | ||
231 | * @rr_cur: number of entries the rr array | 256 | * @rr_cur: number of entries the rr array |
232 | * @seqno: ICMP sequence number | 257 | * @seqno: ICMP sequence number |
233 | * @rr: route record array | 258 | * @rr: route record array |
234 | */ | 259 | */ |
235 | struct batadv_icmp_packet_rr { | 260 | struct batadv_icmp_packet_rr { |
236 | struct batadv_icmp_header icmph; | 261 | uint8_t packet_type; |
262 | uint8_t version; | ||
263 | uint8_t ttl; | ||
264 | uint8_t msg_type; /* see ICMP message types above */ | ||
265 | uint8_t dst[ETH_ALEN]; | ||
266 | uint8_t orig[ETH_ALEN]; | ||
267 | uint8_t uid; | ||
237 | uint8_t rr_cur; | 268 | uint8_t rr_cur; |
238 | __be16 seqno; | 269 | __be16 seqno; |
239 | uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; | 270 | uint8_t rr[BATADV_RR_LEN][ETH_ALEN]; |
@@ -253,8 +284,18 @@ struct batadv_icmp_packet_rr { | |||
253 | */ | 284 | */ |
254 | #pragma pack(2) | 285 | #pragma pack(2) |
255 | 286 | ||
287 | /** | ||
288 | * struct batadv_unicast_packet - unicast packet for network payload | ||
289 | * @packet_type: batman-adv packet type, part of the general header | ||
290 | * @version: batman-adv protocol version, part of the genereal header | ||
291 | * @ttl: time to live for this packet, part of the genereal header | ||
292 | * @ttvn: translation table version number | ||
293 | * @dest: originator destination of the unicast packet | ||
294 | */ | ||
256 | struct batadv_unicast_packet { | 295 | struct batadv_unicast_packet { |
257 | struct batadv_header header; | 296 | uint8_t packet_type; |
297 | uint8_t version; | ||
298 | uint8_t ttl; | ||
258 | uint8_t ttvn; /* destination translation table version number */ | 299 | uint8_t ttvn; /* destination translation table version number */ |
259 | uint8_t dest[ETH_ALEN]; | 300 | uint8_t dest[ETH_ALEN]; |
260 | /* "4 bytes boundary + 2 bytes" long to make the payload after the | 301 | /* "4 bytes boundary + 2 bytes" long to make the payload after the |
@@ -280,7 +321,9 @@ struct batadv_unicast_4addr_packet { | |||
280 | 321 | ||
281 | /** | 322 | /** |
282 | * struct batadv_frag_packet - fragmented packet | 323 | * struct batadv_frag_packet - fragmented packet |
283 | * @header: common batman packet header with type, compatversion, and ttl | 324 | * @packet_type: batman-adv packet type, part of the general header |
325 | * @version: batman-adv protocol version, part of the genereal header | ||
326 | * @ttl: time to live for this packet, part of the genereal header | ||
284 | * @dest: final destination used when routing fragments | 327 | * @dest: final destination used when routing fragments |
285 | * @orig: originator of the fragment used when merging the packet | 328 | * @orig: originator of the fragment used when merging the packet |
286 | * @no: fragment number within this sequence | 329 | * @no: fragment number within this sequence |
@@ -289,7 +332,9 @@ struct batadv_unicast_4addr_packet { | |||
289 | * @total_size: size of the merged packet | 332 | * @total_size: size of the merged packet |
290 | */ | 333 | */ |
291 | struct batadv_frag_packet { | 334 | struct batadv_frag_packet { |
292 | struct batadv_header header; | 335 | uint8_t packet_type; |
336 | uint8_t version; /* batman version field */ | ||
337 | uint8_t ttl; | ||
293 | #if defined(__BIG_ENDIAN_BITFIELD) | 338 | #if defined(__BIG_ENDIAN_BITFIELD) |
294 | uint8_t no:4; | 339 | uint8_t no:4; |
295 | uint8_t reserved:4; | 340 | uint8_t reserved:4; |
@@ -305,8 +350,19 @@ struct batadv_frag_packet { | |||
305 | __be16 total_size; | 350 | __be16 total_size; |
306 | }; | 351 | }; |
307 | 352 | ||
353 | /** | ||
354 | * struct batadv_bcast_packet - broadcast packet for network payload | ||
355 | * @packet_type: batman-adv packet type, part of the general header | ||
356 | * @version: batman-adv protocol version, part of the genereal header | ||
357 | * @ttl: time to live for this packet, part of the genereal header | ||
358 | * @reserved: reserved byte for alignment | ||
359 | * @seqno: sequence identification | ||
360 | * @orig: originator of the broadcast packet | ||
361 | */ | ||
308 | struct batadv_bcast_packet { | 362 | struct batadv_bcast_packet { |
309 | struct batadv_header header; | 363 | uint8_t packet_type; |
364 | uint8_t version; /* batman version field */ | ||
365 | uint8_t ttl; | ||
310 | uint8_t reserved; | 366 | uint8_t reserved; |
311 | __be32 seqno; | 367 | __be32 seqno; |
312 | uint8_t orig[ETH_ALEN]; | 368 | uint8_t orig[ETH_ALEN]; |
@@ -315,11 +371,11 @@ struct batadv_bcast_packet { | |||
315 | */ | 371 | */ |
316 | }; | 372 | }; |
317 | 373 | ||
318 | #pragma pack() | ||
319 | |||
320 | /** | 374 | /** |
321 | * struct batadv_coded_packet - network coded packet | 375 | * struct batadv_coded_packet - network coded packet |
322 | * @header: common batman packet header and ttl of first included packet | 376 | * @packet_type: batman-adv packet type, part of the general header |
377 | * @version: batman-adv protocol version, part of the genereal header | ||
378 | * @ttl: time to live for this packet, part of the genereal header | ||
323 | * @reserved: Align following fields to 2-byte boundaries | 379 | * @reserved: Align following fields to 2-byte boundaries |
324 | * @first_source: original source of first included packet | 380 | * @first_source: original source of first included packet |
325 | * @first_orig_dest: original destinal of first included packet | 381 | * @first_orig_dest: original destinal of first included packet |
@@ -334,7 +390,9 @@ struct batadv_bcast_packet { | |||
334 | * @coded_len: length of network coded part of the payload | 390 | * @coded_len: length of network coded part of the payload |
335 | */ | 391 | */ |
336 | struct batadv_coded_packet { | 392 | struct batadv_coded_packet { |
337 | struct batadv_header header; | 393 | uint8_t packet_type; |
394 | uint8_t version; /* batman version field */ | ||
395 | uint8_t ttl; | ||
338 | uint8_t first_ttvn; | 396 | uint8_t first_ttvn; |
339 | /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */ | 397 | /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */ |
340 | uint8_t first_source[ETH_ALEN]; | 398 | uint8_t first_source[ETH_ALEN]; |
@@ -349,9 +407,13 @@ struct batadv_coded_packet { | |||
349 | __be16 coded_len; | 407 | __be16 coded_len; |
350 | }; | 408 | }; |
351 | 409 | ||
410 | #pragma pack() | ||
411 | |||
352 | /** | 412 | /** |
353 | * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload | 413 | * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload |
354 | * @header: common batman packet header | 414 | * @packet_type: batman-adv packet type, part of the general header |
415 | * @version: batman-adv protocol version, part of the genereal header | ||
416 | * @ttl: time to live for this packet, part of the genereal header | ||
355 | * @reserved: reserved field (for packet alignment) | 417 | * @reserved: reserved field (for packet alignment) |
356 | * @src: address of the source | 418 | * @src: address of the source |
357 | * @dst: address of the destination | 419 | * @dst: address of the destination |
@@ -359,7 +421,9 @@ struct batadv_coded_packet { | |||
359 | * @align: 2 bytes to align the header to a 4 byte boundry | 421 | * @align: 2 bytes to align the header to a 4 byte boundry |
360 | */ | 422 | */ |
361 | struct batadv_unicast_tvlv_packet { | 423 | struct batadv_unicast_tvlv_packet { |
362 | struct batadv_header header; | 424 | uint8_t packet_type; |
425 | uint8_t version; /* batman version field */ | ||
426 | uint8_t ttl; | ||
363 | uint8_t reserved; | 427 | uint8_t reserved; |
364 | uint8_t dst[ETH_ALEN]; | 428 | uint8_t dst[ETH_ALEN]; |
365 | uint8_t src[ETH_ALEN]; | 429 | uint8_t src[ETH_ALEN]; |
@@ -420,13 +484,13 @@ struct batadv_tvlv_tt_vlan_data { | |||
420 | * struct batadv_tvlv_tt_change - translation table diff data | 484 | * struct batadv_tvlv_tt_change - translation table diff data |
421 | * @flags: status indicators concerning the non-mesh client (see | 485 | * @flags: status indicators concerning the non-mesh client (see |
422 | * batadv_tt_client_flags) | 486 | * batadv_tt_client_flags) |
423 | * @reserved: reserved field | 487 | * @reserved: reserved field - useful for alignment purposes only |
424 | * @addr: mac address of non-mesh client that triggered this tt change | 488 | * @addr: mac address of non-mesh client that triggered this tt change |
425 | * @vid: VLAN identifier | 489 | * @vid: VLAN identifier |
426 | */ | 490 | */ |
427 | struct batadv_tvlv_tt_change { | 491 | struct batadv_tvlv_tt_change { |
428 | uint8_t flags; | 492 | uint8_t flags; |
429 | uint8_t reserved; | 493 | uint8_t reserved[3]; |
430 | uint8_t addr[ETH_ALEN]; | 494 | uint8_t addr[ETH_ALEN]; |
431 | __be16 vid; | 495 | __be16 vid; |
432 | }; | 496 | }; |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index d4114d775ad6..46278bfb8fdb 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -308,7 +308,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv, | |||
308 | memcpy(icmph->dst, icmph->orig, ETH_ALEN); | 308 | memcpy(icmph->dst, icmph->orig, ETH_ALEN); |
309 | memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN); | 309 | memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN); |
310 | icmph->msg_type = BATADV_ECHO_REPLY; | 310 | icmph->msg_type = BATADV_ECHO_REPLY; |
311 | icmph->header.ttl = BATADV_TTL; | 311 | icmph->ttl = BATADV_TTL; |
312 | 312 | ||
313 | res = batadv_send_skb_to_orig(skb, orig_node, NULL); | 313 | res = batadv_send_skb_to_orig(skb, orig_node, NULL); |
314 | if (res != NET_XMIT_DROP) | 314 | if (res != NET_XMIT_DROP) |
@@ -338,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
338 | icmp_packet = (struct batadv_icmp_packet *)skb->data; | 338 | icmp_packet = (struct batadv_icmp_packet *)skb->data; |
339 | 339 | ||
340 | /* send TTL exceeded if packet is an echo request (traceroute) */ | 340 | /* send TTL exceeded if packet is an echo request (traceroute) */ |
341 | if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) { | 341 | if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) { |
342 | pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", | 342 | pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n", |
343 | icmp_packet->icmph.orig, icmp_packet->icmph.dst); | 343 | icmp_packet->orig, icmp_packet->dst); |
344 | goto out; | 344 | goto out; |
345 | } | 345 | } |
346 | 346 | ||
@@ -349,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
349 | goto out; | 349 | goto out; |
350 | 350 | ||
351 | /* get routing information */ | 351 | /* get routing information */ |
352 | orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig); | 352 | orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig); |
353 | if (!orig_node) | 353 | if (!orig_node) |
354 | goto out; | 354 | goto out; |
355 | 355 | ||
@@ -359,11 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, | |||
359 | 359 | ||
360 | icmp_packet = (struct batadv_icmp_packet *)skb->data; | 360 | icmp_packet = (struct batadv_icmp_packet *)skb->data; |
361 | 361 | ||
362 | memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN); | 362 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); |
363 | memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr, | 363 | memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, |
364 | ETH_ALEN); | 364 | ETH_ALEN); |
365 | icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED; | 365 | icmp_packet->msg_type = BATADV_TTL_EXCEEDED; |
366 | icmp_packet->icmph.header.ttl = BATADV_TTL; | 366 | icmp_packet->ttl = BATADV_TTL; |
367 | 367 | ||
368 | if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) | 368 | if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) |
369 | ret = NET_RX_SUCCESS; | 369 | ret = NET_RX_SUCCESS; |
@@ -434,7 +434,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
434 | return batadv_recv_my_icmp_packet(bat_priv, skb); | 434 | return batadv_recv_my_icmp_packet(bat_priv, skb); |
435 | 435 | ||
436 | /* TTL exceeded */ | 436 | /* TTL exceeded */ |
437 | if (icmph->header.ttl < 2) | 437 | if (icmph->ttl < 2) |
438 | return batadv_recv_icmp_ttl_exceeded(bat_priv, skb); | 438 | return batadv_recv_icmp_ttl_exceeded(bat_priv, skb); |
439 | 439 | ||
440 | /* get routing information */ | 440 | /* get routing information */ |
@@ -449,7 +449,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
449 | icmph = (struct batadv_icmp_header *)skb->data; | 449 | icmph = (struct batadv_icmp_header *)skb->data; |
450 | 450 | ||
451 | /* decrement ttl */ | 451 | /* decrement ttl */ |
452 | icmph->header.ttl--; | 452 | icmph->ttl--; |
453 | 453 | ||
454 | /* route it */ | 454 | /* route it */ |
455 | if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) | 455 | if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) |
@@ -709,7 +709,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, | |||
709 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 709 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
710 | 710 | ||
711 | /* TTL exceeded */ | 711 | /* TTL exceeded */ |
712 | if (unicast_packet->header.ttl < 2) { | 712 | if (unicast_packet->ttl < 2) { |
713 | pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n", | 713 | pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n", |
714 | ethhdr->h_source, unicast_packet->dest); | 714 | ethhdr->h_source, unicast_packet->dest); |
715 | goto out; | 715 | goto out; |
@@ -727,9 +727,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, | |||
727 | 727 | ||
728 | /* decrement ttl */ | 728 | /* decrement ttl */ |
729 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 729 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
730 | unicast_packet->header.ttl--; | 730 | unicast_packet->ttl--; |
731 | 731 | ||
732 | switch (unicast_packet->header.packet_type) { | 732 | switch (unicast_packet->packet_type) { |
733 | case BATADV_UNICAST_4ADDR: | 733 | case BATADV_UNICAST_4ADDR: |
734 | hdr_len = sizeof(struct batadv_unicast_4addr_packet); | 734 | hdr_len = sizeof(struct batadv_unicast_4addr_packet); |
735 | break; | 735 | break; |
@@ -970,7 +970,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
970 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 970 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
971 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 971 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
972 | 972 | ||
973 | is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR; | 973 | is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR; |
974 | /* the caller function should have already pulled 2 bytes */ | 974 | /* the caller function should have already pulled 2 bytes */ |
975 | if (is4addr) | 975 | if (is4addr) |
976 | hdr_size = sizeof(*unicast_4addr_packet); | 976 | hdr_size = sizeof(*unicast_4addr_packet); |
@@ -1160,7 +1160,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, | |||
1160 | if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) | 1160 | if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) |
1161 | goto out; | 1161 | goto out; |
1162 | 1162 | ||
1163 | if (bcast_packet->header.ttl < 2) | 1163 | if (bcast_packet->ttl < 2) |
1164 | goto out; | 1164 | goto out; |
1165 | 1165 | ||
1166 | orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig); | 1166 | orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig); |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index c83be5ebaa28..fba4dcfcfac2 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -161,11 +161,11 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size, | |||
161 | return false; | 161 | return false; |
162 | 162 | ||
163 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 163 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
164 | unicast_packet->header.version = BATADV_COMPAT_VERSION; | 164 | unicast_packet->version = BATADV_COMPAT_VERSION; |
165 | /* batman packet type: unicast */ | 165 | /* batman packet type: unicast */ |
166 | unicast_packet->header.packet_type = BATADV_UNICAST; | 166 | unicast_packet->packet_type = BATADV_UNICAST; |
167 | /* set unicast ttl */ | 167 | /* set unicast ttl */ |
168 | unicast_packet->header.ttl = BATADV_TTL; | 168 | unicast_packet->ttl = BATADV_TTL; |
169 | /* copy the destination for faster routing */ | 169 | /* copy the destination for faster routing */ |
170 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); | 170 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); |
171 | /* set the destination tt version number */ | 171 | /* set the destination tt version number */ |
@@ -221,7 +221,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, | |||
221 | goto out; | 221 | goto out; |
222 | 222 | ||
223 | uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 223 | uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
224 | uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR; | 224 | uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR; |
225 | memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); | 225 | memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); |
226 | uc_4addr_packet->subtype = packet_subtype; | 226 | uc_4addr_packet->subtype = packet_subtype; |
227 | uc_4addr_packet->reserved = 0; | 227 | uc_4addr_packet->reserved = 0; |
@@ -436,7 +436,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, | |||
436 | 436 | ||
437 | /* as we have a copy now, it is safe to decrease the TTL */ | 437 | /* as we have a copy now, it is safe to decrease the TTL */ |
438 | bcast_packet = (struct batadv_bcast_packet *)newskb->data; | 438 | bcast_packet = (struct batadv_bcast_packet *)newskb->data; |
439 | bcast_packet->header.ttl--; | 439 | bcast_packet->ttl--; |
440 | 440 | ||
441 | skb_reset_mac_header(newskb); | 441 | skb_reset_mac_header(newskb); |
442 | 442 | ||
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 36f050876f82..a8f99d1486c0 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -264,11 +264,11 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
264 | goto dropped; | 264 | goto dropped; |
265 | 265 | ||
266 | bcast_packet = (struct batadv_bcast_packet *)skb->data; | 266 | bcast_packet = (struct batadv_bcast_packet *)skb->data; |
267 | bcast_packet->header.version = BATADV_COMPAT_VERSION; | 267 | bcast_packet->version = BATADV_COMPAT_VERSION; |
268 | bcast_packet->header.ttl = BATADV_TTL; | 268 | bcast_packet->ttl = BATADV_TTL; |
269 | 269 | ||
270 | /* batman packet type: broadcast */ | 270 | /* batman packet type: broadcast */ |
271 | bcast_packet->header.packet_type = BATADV_BCAST; | 271 | bcast_packet->packet_type = BATADV_BCAST; |
272 | bcast_packet->reserved = 0; | 272 | bcast_packet->reserved = 0; |
273 | 273 | ||
274 | /* hw address of first interface is the orig mac because only | 274 | /* hw address of first interface is the orig mac because only |
@@ -328,7 +328,7 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
328 | struct sk_buff *skb, struct batadv_hard_iface *recv_if, | 328 | struct sk_buff *skb, struct batadv_hard_iface *recv_if, |
329 | int hdr_size, struct batadv_orig_node *orig_node) | 329 | int hdr_size, struct batadv_orig_node *orig_node) |
330 | { | 330 | { |
331 | struct batadv_header *batadv_header = (struct batadv_header *)skb->data; | 331 | struct batadv_bcast_packet *batadv_bcast_packet; |
332 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); | 332 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
333 | __be16 ethertype = htons(ETH_P_BATMAN); | 333 | __be16 ethertype = htons(ETH_P_BATMAN); |
334 | struct vlan_ethhdr *vhdr; | 334 | struct vlan_ethhdr *vhdr; |
@@ -336,7 +336,8 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
336 | unsigned short vid; | 336 | unsigned short vid; |
337 | bool is_bcast; | 337 | bool is_bcast; |
338 | 338 | ||
339 | is_bcast = (batadv_header->packet_type == BATADV_BCAST); | 339 | batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; |
340 | is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); | ||
340 | 341 | ||
341 | /* check if enough space is available for pulling, and pull */ | 342 | /* check if enough space is available for pulling, and pull */ |
342 | if (!pskb_may_pull(skb, hdr_size)) | 343 | if (!pskb_may_pull(skb, hdr_size)) |
@@ -345,7 +346,12 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
345 | skb_pull_rcsum(skb, hdr_size); | 346 | skb_pull_rcsum(skb, hdr_size); |
346 | skb_reset_mac_header(skb); | 347 | skb_reset_mac_header(skb); |
347 | 348 | ||
348 | vid = batadv_get_vid(skb, hdr_size); | 349 | /* clean the netfilter state now that the batman-adv header has been |
350 | * removed | ||
351 | */ | ||
352 | nf_reset(skb); | ||
353 | |||
354 | vid = batadv_get_vid(skb, 0); | ||
349 | ethhdr = eth_hdr(skb); | 355 | ethhdr = eth_hdr(skb); |
350 | 356 | ||
351 | switch (ntohs(ethhdr->h_proto)) { | 357 | switch (ntohs(ethhdr->h_proto)) { |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 4add57d4857f..ff625fedbc5e 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -333,7 +333,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv, | |||
333 | return; | 333 | return; |
334 | 334 | ||
335 | tt_change_node->change.flags = flags; | 335 | tt_change_node->change.flags = flags; |
336 | tt_change_node->change.reserved = 0; | 336 | memset(tt_change_node->change.reserved, 0, |
337 | sizeof(tt_change_node->change.reserved)); | ||
337 | memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN); | 338 | memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN); |
338 | tt_change_node->change.vid = htons(common->vid); | 339 | tt_change_node->change.vid = htons(common->vid); |
339 | 340 | ||
@@ -2221,7 +2222,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, | |||
2221 | ETH_ALEN); | 2222 | ETH_ALEN); |
2222 | tt_change->flags = tt_common_entry->flags; | 2223 | tt_change->flags = tt_common_entry->flags; |
2223 | tt_change->vid = htons(tt_common_entry->vid); | 2224 | tt_change->vid = htons(tt_common_entry->vid); |
2224 | tt_change->reserved = 0; | 2225 | memset(tt_change->reserved, 0, |
2226 | sizeof(tt_change->reserved)); | ||
2225 | 2227 | ||
2226 | tt_num_entries++; | 2228 | tt_num_entries++; |
2227 | tt_change++; | 2229 | tt_change++; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 6a6c8bb4fd72..7552f9e3089c 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -940,8 +940,22 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
940 | bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); | 940 | bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); |
941 | skb_pull(skb, 1); | 941 | skb_pull(skb, 1); |
942 | 942 | ||
943 | if (hci_pi(sk)->channel == HCI_CHANNEL_RAW && | 943 | if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { |
944 | bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { | 944 | /* No permission check is needed for user channel |
945 | * since that gets enforced when binding the socket. | ||
946 | * | ||
947 | * However check that the packet type is valid. | ||
948 | */ | ||
949 | if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT && | ||
950 | bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && | ||
951 | bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { | ||
952 | err = -EINVAL; | ||
953 | goto drop; | ||
954 | } | ||
955 | |||
956 | skb_queue_tail(&hdev->raw_q, skb); | ||
957 | queue_work(hdev->workqueue, &hdev->tx_work); | ||
958 | } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { | ||
945 | u16 opcode = get_unaligned_le16(skb->data); | 959 | u16 opcode = get_unaligned_le16(skb->data); |
946 | u16 ogf = hci_opcode_ogf(opcode); | 960 | u16 ogf = hci_opcode_ogf(opcode); |
947 | u16 ocf = hci_opcode_ocf(opcode); | 961 | u16 ocf = hci_opcode_ocf(opcode); |
@@ -972,14 +986,6 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
972 | goto drop; | 986 | goto drop; |
973 | } | 987 | } |
974 | 988 | ||
975 | if (hci_pi(sk)->channel == HCI_CHANNEL_USER && | ||
976 | bt_cb(skb)->pkt_type != HCI_COMMAND_PKT && | ||
977 | bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && | ||
978 | bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { | ||
979 | err = -EINVAL; | ||
980 | goto drop; | ||
981 | } | ||
982 | |||
983 | skb_queue_tail(&hdev->raw_q, skb); | 989 | skb_queue_tail(&hdev->raw_q, skb); |
984 | queue_work(hdev->workqueue, &hdev->tx_work); | 990 | queue_work(hdev->workqueue, &hdev->tx_work); |
985 | } | 991 | } |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 4c214b2b88ef..ef66365b7354 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1998,7 +1998,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) | |||
1998 | u32 old; | 1998 | u32 old; |
1999 | struct net_bridge_mdb_htable *mdb; | 1999 | struct net_bridge_mdb_htable *mdb; |
2000 | 2000 | ||
2001 | spin_lock(&br->multicast_lock); | 2001 | spin_lock_bh(&br->multicast_lock); |
2002 | if (!netif_running(br->dev)) | 2002 | if (!netif_running(br->dev)) |
2003 | goto unlock; | 2003 | goto unlock; |
2004 | 2004 | ||
@@ -2030,7 +2030,7 @@ rollback: | |||
2030 | } | 2030 | } |
2031 | 2031 | ||
2032 | unlock: | 2032 | unlock: |
2033 | spin_unlock(&br->multicast_lock); | 2033 | spin_unlock_bh(&br->multicast_lock); |
2034 | 2034 | ||
2035 | return err; | 2035 | return err; |
2036 | } | 2036 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index d2b87dbbbb1a..2e0c6a90f6f2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb, | |||
2539 | } | 2539 | } |
2540 | 2540 | ||
2541 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2541 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
2542 | struct netdev_queue *txq, void *accel_priv) | 2542 | struct netdev_queue *txq) |
2543 | { | 2543 | { |
2544 | const struct net_device_ops *ops = dev->netdev_ops; | 2544 | const struct net_device_ops *ops = dev->netdev_ops; |
2545 | int rc = NETDEV_TX_OK; | 2545 | int rc = NETDEV_TX_OK; |
@@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
2605 | dev_queue_xmit_nit(skb, dev); | 2605 | dev_queue_xmit_nit(skb, dev); |
2606 | 2606 | ||
2607 | skb_len = skb->len; | 2607 | skb_len = skb->len; |
2608 | if (accel_priv) | ||
2609 | rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv); | ||
2610 | else | ||
2611 | rc = ops->ndo_start_xmit(skb, dev); | 2608 | rc = ops->ndo_start_xmit(skb, dev); |
2612 | 2609 | ||
2613 | trace_net_dev_xmit(skb, rc, dev, skb_len); | 2610 | trace_net_dev_xmit(skb, rc, dev, skb_len); |
2614 | if (rc == NETDEV_TX_OK && txq) | 2611 | if (rc == NETDEV_TX_OK) |
2615 | txq_trans_update(txq); | 2612 | txq_trans_update(txq); |
2616 | return rc; | 2613 | return rc; |
2617 | } | 2614 | } |
@@ -2627,10 +2624,7 @@ gso: | |||
2627 | dev_queue_xmit_nit(nskb, dev); | 2624 | dev_queue_xmit_nit(nskb, dev); |
2628 | 2625 | ||
2629 | skb_len = nskb->len; | 2626 | skb_len = nskb->len; |
2630 | if (accel_priv) | 2627 | rc = ops->ndo_start_xmit(nskb, dev); |
2631 | rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv); | ||
2632 | else | ||
2633 | rc = ops->ndo_start_xmit(nskb, dev); | ||
2634 | trace_net_dev_xmit(nskb, rc, dev, skb_len); | 2628 | trace_net_dev_xmit(nskb, rc, dev, skb_len); |
2635 | if (unlikely(rc != NETDEV_TX_OK)) { | 2629 | if (unlikely(rc != NETDEV_TX_OK)) { |
2636 | if (rc & ~NETDEV_TX_MASK) | 2630 | if (rc & ~NETDEV_TX_MASK) |
@@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit); | |||
2811 | * the BH enable code must have IRQs enabled so that it will not deadlock. | 2805 | * the BH enable code must have IRQs enabled so that it will not deadlock. |
2812 | * --BLG | 2806 | * --BLG |
2813 | */ | 2807 | */ |
2814 | int dev_queue_xmit(struct sk_buff *skb) | 2808 | int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) |
2815 | { | 2809 | { |
2816 | struct net_device *dev = skb->dev; | 2810 | struct net_device *dev = skb->dev; |
2817 | struct netdev_queue *txq; | 2811 | struct netdev_queue *txq; |
@@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
2827 | 2821 | ||
2828 | skb_update_prio(skb); | 2822 | skb_update_prio(skb); |
2829 | 2823 | ||
2830 | txq = netdev_pick_tx(dev, skb); | 2824 | txq = netdev_pick_tx(dev, skb, accel_priv); |
2831 | q = rcu_dereference_bh(txq->qdisc); | 2825 | q = rcu_dereference_bh(txq->qdisc); |
2832 | 2826 | ||
2833 | #ifdef CONFIG_NET_CLS_ACT | 2827 | #ifdef CONFIG_NET_CLS_ACT |
@@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
2863 | 2857 | ||
2864 | if (!netif_xmit_stopped(txq)) { | 2858 | if (!netif_xmit_stopped(txq)) { |
2865 | __this_cpu_inc(xmit_recursion); | 2859 | __this_cpu_inc(xmit_recursion); |
2866 | rc = dev_hard_start_xmit(skb, dev, txq, NULL); | 2860 | rc = dev_hard_start_xmit(skb, dev, txq); |
2867 | __this_cpu_dec(xmit_recursion); | 2861 | __this_cpu_dec(xmit_recursion); |
2868 | if (dev_xmit_complete(rc)) { | 2862 | if (dev_xmit_complete(rc)) { |
2869 | HARD_TX_UNLOCK(dev, txq); | 2863 | HARD_TX_UNLOCK(dev, txq); |
@@ -2892,8 +2886,19 @@ out: | |||
2892 | rcu_read_unlock_bh(); | 2886 | rcu_read_unlock_bh(); |
2893 | return rc; | 2887 | return rc; |
2894 | } | 2888 | } |
2889 | |||
2890 | int dev_queue_xmit(struct sk_buff *skb) | ||
2891 | { | ||
2892 | return __dev_queue_xmit(skb, NULL); | ||
2893 | } | ||
2895 | EXPORT_SYMBOL(dev_queue_xmit); | 2894 | EXPORT_SYMBOL(dev_queue_xmit); |
2896 | 2895 | ||
2896 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) | ||
2897 | { | ||
2898 | return __dev_queue_xmit(skb, accel_priv); | ||
2899 | } | ||
2900 | EXPORT_SYMBOL(dev_queue_xmit_accel); | ||
2901 | |||
2897 | 2902 | ||
2898 | /*======================================================================= | 2903 | /*======================================================================= |
2899 | Receiver routines | 2904 | Receiver routines |
@@ -4500,7 +4505,7 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, | |||
4500 | { | 4505 | { |
4501 | struct netdev_adjacent *upper; | 4506 | struct netdev_adjacent *upper; |
4502 | 4507 | ||
4503 | WARN_ON_ONCE(!rcu_read_lock_held()); | 4508 | WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); |
4504 | 4509 | ||
4505 | upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); | 4510 | upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); |
4506 | 4511 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index 01b780856db2..ad30d626a5bd 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
37 | #include <asm/unaligned.h> | 37 | #include <asm/unaligned.h> |
38 | #include <linux/filter.h> | 38 | #include <linux/filter.h> |
39 | #include <linux/reciprocal_div.h> | ||
40 | #include <linux/ratelimit.h> | 39 | #include <linux/ratelimit.h> |
41 | #include <linux/seccomp.h> | 40 | #include <linux/seccomp.h> |
42 | #include <linux/if_vlan.h> | 41 | #include <linux/if_vlan.h> |
@@ -166,7 +165,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb, | |||
166 | A /= X; | 165 | A /= X; |
167 | continue; | 166 | continue; |
168 | case BPF_S_ALU_DIV_K: | 167 | case BPF_S_ALU_DIV_K: |
169 | A = reciprocal_divide(A, K); | 168 | A /= K; |
170 | continue; | 169 | continue; |
171 | case BPF_S_ALU_MOD_X: | 170 | case BPF_S_ALU_MOD_X: |
172 | if (X == 0) | 171 | if (X == 0) |
@@ -553,11 +552,6 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen) | |||
553 | /* Some instructions need special checks */ | 552 | /* Some instructions need special checks */ |
554 | switch (code) { | 553 | switch (code) { |
555 | case BPF_S_ALU_DIV_K: | 554 | case BPF_S_ALU_DIV_K: |
556 | /* check for division by zero */ | ||
557 | if (ftest->k == 0) | ||
558 | return -EINVAL; | ||
559 | ftest->k = reciprocal_value(ftest->k); | ||
560 | break; | ||
561 | case BPF_S_ALU_MOD_K: | 555 | case BPF_S_ALU_MOD_K: |
562 | /* check for division by zero */ | 556 | /* check for division by zero */ |
563 | if (ftest->k == 0) | 557 | if (ftest->k == 0) |
@@ -853,27 +847,7 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) | |||
853 | to->code = decodes[code]; | 847 | to->code = decodes[code]; |
854 | to->jt = filt->jt; | 848 | to->jt = filt->jt; |
855 | to->jf = filt->jf; | 849 | to->jf = filt->jf; |
856 | 850 | to->k = filt->k; | |
857 | if (code == BPF_S_ALU_DIV_K) { | ||
858 | /* | ||
859 | * When loaded this rule user gave us X, which was | ||
860 | * translated into R = r(X). Now we calculate the | ||
861 | * RR = r(R) and report it back. If next time this | ||
862 | * value is loaded and RRR = r(RR) is calculated | ||
863 | * then the R == RRR will be true. | ||
864 | * | ||
865 | * One exception. X == 1 translates into R == 0 and | ||
866 | * we can't calculate RR out of it with r(). | ||
867 | */ | ||
868 | |||
869 | if (filt->k == 0) | ||
870 | to->k = 1; | ||
871 | else | ||
872 | to->k = reciprocal_value(filt->k); | ||
873 | |||
874 | BUG_ON(reciprocal_value(to->k) != filt->k); | ||
875 | } else | ||
876 | to->k = filt->k; | ||
877 | } | 851 | } |
878 | 852 | ||
879 | int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len) | 853 | int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len) |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index d6ef17322500..2fc5beaf5783 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
395 | EXPORT_SYMBOL(__netdev_pick_tx); | 395 | EXPORT_SYMBOL(__netdev_pick_tx); |
396 | 396 | ||
397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
398 | struct sk_buff *skb) | 398 | struct sk_buff *skb, |
399 | void *accel_priv) | ||
399 | { | 400 | { |
400 | int queue_index = 0; | 401 | int queue_index = 0; |
401 | 402 | ||
402 | if (dev->real_num_tx_queues != 1) { | 403 | if (dev->real_num_tx_queues != 1) { |
403 | const struct net_device_ops *ops = dev->netdev_ops; | 404 | const struct net_device_ops *ops = dev->netdev_ops; |
404 | if (ops->ndo_select_queue) | 405 | if (ops->ndo_select_queue) |
405 | queue_index = ops->ndo_select_queue(dev, skb); | 406 | queue_index = ops->ndo_select_queue(dev, skb, |
407 | accel_priv); | ||
406 | else | 408 | else |
407 | queue_index = __netdev_pick_tx(dev, skb); | 409 | queue_index = __netdev_pick_tx(dev, skb); |
408 | queue_index = dev_cap_txqueue(dev, queue_index); | 410 | |
411 | if (!accel_priv) | ||
412 | queue_index = dev_cap_txqueue(dev, queue_index); | ||
409 | } | 413 | } |
410 | 414 | ||
411 | skb_set_queue_mapping(skb, queue_index); | 415 | skb_set_queue_mapping(skb, queue_index); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ca15f32821fb..932c6d7cf666 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1161,6 +1161,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, | |||
1161 | neigh->parms->reachable_time : | 1161 | neigh->parms->reachable_time : |
1162 | 0))); | 1162 | 0))); |
1163 | neigh->nud_state = new; | 1163 | neigh->nud_state = new; |
1164 | notify = 1; | ||
1164 | } | 1165 | } |
1165 | 1166 | ||
1166 | if (lladdr != neigh->ha) { | 1167 | if (lladdr != neigh->ha) { |
@@ -1274,7 +1275,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb) | |||
1274 | 1275 | ||
1275 | if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, | 1276 | if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, |
1276 | skb->len) < 0 && | 1277 | skb->len) < 0 && |
1277 | dev->header_ops->rebuild(skb)) | 1278 | dev_rebuild_header(skb)) |
1278 | return 0; | 1279 | return 0; |
1279 | 1280 | ||
1280 | return dev_queue_xmit(skb); | 1281 | return dev_queue_xmit(skb); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 8f971990677c..19fe9c717ced 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
375 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { | 375 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
376 | struct netdev_queue *txq; | 376 | struct netdev_queue *txq; |
377 | 377 | ||
378 | txq = netdev_pick_tx(dev, skb); | 378 | txq = netdev_pick_tx(dev, skb, NULL); |
379 | 379 | ||
380 | /* try until next clock tick */ | 380 | /* try until next clock tick */ |
381 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | 381 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
@@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
386 | !vlan_hw_offload_capable(netif_skb_features(skb), | 386 | !vlan_hw_offload_capable(netif_skb_features(skb), |
387 | skb->vlan_proto)) { | 387 | skb->vlan_proto)) { |
388 | skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); | 388 | skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); |
389 | if (unlikely(!skb)) | 389 | if (unlikely(!skb)) { |
390 | break; | 390 | /* This is actually a packet drop, but we |
391 | * don't want the code at the end of this | ||
392 | * function to try and re-queue a NULL skb. | ||
393 | */ | ||
394 | status = NETDEV_TX_OK; | ||
395 | goto unlock_txq; | ||
396 | } | ||
391 | skb->vlan_tci = 0; | 397 | skb->vlan_tci = 0; |
392 | } | 398 | } |
393 | 399 | ||
@@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
395 | if (status == NETDEV_TX_OK) | 401 | if (status == NETDEV_TX_OK) |
396 | txq_trans_update(txq); | 402 | txq_trans_update(txq); |
397 | } | 403 | } |
404 | unlock_txq: | ||
398 | __netif_tx_unlock(txq); | 405 | __netif_tx_unlock(txq); |
399 | 406 | ||
400 | if (status == NETDEV_TX_OK) | 407 | if (status == NETDEV_TX_OK) |
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 9b7cf6c85f82..56cbb69ba024 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c | |||
@@ -173,14 +173,14 @@ static u64 read_prioidx(struct cgroup_subsys_state *css, struct cftype *cft) | |||
173 | return css->cgroup->id; | 173 | return css->cgroup->id; |
174 | } | 174 | } |
175 | 175 | ||
176 | static int read_priomap(struct cgroup_subsys_state *css, struct cftype *cft, | 176 | static int read_priomap(struct seq_file *sf, void *v) |
177 | struct cgroup_map_cb *cb) | ||
178 | { | 177 | { |
179 | struct net_device *dev; | 178 | struct net_device *dev; |
180 | 179 | ||
181 | rcu_read_lock(); | 180 | rcu_read_lock(); |
182 | for_each_netdev_rcu(&init_net, dev) | 181 | for_each_netdev_rcu(&init_net, dev) |
183 | cb->fill(cb, dev->name, netprio_prio(css, dev)); | 182 | seq_printf(sf, "%s %u\n", dev->name, |
183 | netprio_prio(seq_css(sf), dev)); | ||
184 | rcu_read_unlock(); | 184 | rcu_read_unlock(); |
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
@@ -238,7 +238,7 @@ static struct cftype ss_files[] = { | |||
238 | }, | 238 | }, |
239 | { | 239 | { |
240 | .name = "ifpriomap", | 240 | .name = "ifpriomap", |
241 | .read_map = read_priomap, | 241 | .seq_show = read_priomap, |
242 | .write_string = write_priomap, | 242 | .write_string = write_priomap, |
243 | }, | 243 | }, |
244 | { } /* terminate */ | 244 | { } /* terminate */ |
diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 4c6bdf97a657..595ddf0459db 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c | |||
@@ -152,17 +152,6 @@ static const struct file_operations dccpprobe_fops = { | |||
152 | .llseek = noop_llseek, | 152 | .llseek = noop_llseek, |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static __init int setup_jprobe(void) | ||
156 | { | ||
157 | int ret = register_jprobe(&dccp_send_probe); | ||
158 | |||
159 | if (ret) { | ||
160 | request_module("dccp"); | ||
161 | ret = register_jprobe(&dccp_send_probe); | ||
162 | } | ||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | static __init int dccpprobe_init(void) | 155 | static __init int dccpprobe_init(void) |
167 | { | 156 | { |
168 | int ret = -ENOMEM; | 157 | int ret = -ENOMEM; |
@@ -174,7 +163,13 @@ static __init int dccpprobe_init(void) | |||
174 | if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops)) | 163 | if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops)) |
175 | goto err0; | 164 | goto err0; |
176 | 165 | ||
177 | ret = setup_jprobe(); | 166 | ret = register_jprobe(&dccp_send_probe); |
167 | if (ret) { | ||
168 | ret = request_module("dccp"); | ||
169 | if (!ret) | ||
170 | ret = register_jprobe(&dccp_send_probe); | ||
171 | } | ||
172 | |||
178 | if (ret) | 173 | if (ret) |
179 | goto err1; | 174 | goto err1; |
180 | 175 | ||
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 459e200c08a4..a2d2456a557a 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c | |||
@@ -547,7 +547,7 @@ static int lowpan_header_create(struct sk_buff *skb, | |||
547 | hc06_ptr += 3; | 547 | hc06_ptr += 3; |
548 | } else { | 548 | } else { |
549 | /* compress nothing */ | 549 | /* compress nothing */ |
550 | memcpy(hc06_ptr, &hdr, 4); | 550 | memcpy(hc06_ptr, hdr, 4); |
551 | /* replace the top byte with new ECN | DSCP format */ | 551 | /* replace the top byte with new ECN | DSCP format */ |
552 | *hc06_ptr = tmp; | 552 | *hc06_ptr = tmp; |
553 | hc06_ptr += 4; | 553 | hc06_ptr += 4; |
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index d08c7a43dcd1..89b265aea151 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c | |||
@@ -221,8 +221,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) | |||
221 | 221 | ||
222 | if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) { | 222 | if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) { |
223 | type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]); | 223 | type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]); |
224 | if (type >= __IEEE802154_DEV_MAX) | 224 | if (type >= __IEEE802154_DEV_MAX) { |
225 | return -EINVAL; | 225 | rc = -EINVAL; |
226 | goto nla_put_failure; | ||
227 | } | ||
226 | } | 228 | } |
227 | 229 | ||
228 | dev = phy->add_iface(phy, devname, type); | 230 | dev = phy->add_iface(phy, devname, type); |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index e5d436188464..2cd02f32f99f 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
@@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
28 | netdev_features_t enc_features; | 28 | netdev_features_t enc_features; |
29 | int ghl = GRE_HEADER_SECTION; | 29 | int ghl = GRE_HEADER_SECTION; |
30 | struct gre_base_hdr *greh; | 30 | struct gre_base_hdr *greh; |
31 | u16 mac_offset = skb->mac_header; | ||
31 | int mac_len = skb->mac_len; | 32 | int mac_len = skb->mac_len; |
32 | __be16 protocol = skb->protocol; | 33 | __be16 protocol = skb->protocol; |
33 | int tnl_hlen; | 34 | int tnl_hlen; |
@@ -58,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
58 | } else | 59 | } else |
59 | csum = false; | 60 | csum = false; |
60 | 61 | ||
62 | if (unlikely(!pskb_may_pull(skb, ghl))) | ||
63 | goto out; | ||
64 | |||
61 | /* setup inner skb. */ | 65 | /* setup inner skb. */ |
62 | skb->protocol = greh->protocol; | 66 | skb->protocol = greh->protocol; |
63 | skb->encapsulation = 0; | 67 | skb->encapsulation = 0; |
64 | 68 | ||
65 | if (unlikely(!pskb_may_pull(skb, ghl))) | ||
66 | goto out; | ||
67 | |||
68 | __skb_pull(skb, ghl); | 69 | __skb_pull(skb, ghl); |
69 | skb_reset_mac_header(skb); | 70 | skb_reset_mac_header(skb); |
70 | skb_set_network_header(skb, skb_inner_network_offset(skb)); | 71 | skb_set_network_header(skb, skb_inner_network_offset(skb)); |
@@ -73,8 +74,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
73 | /* segment inner packet. */ | 74 | /* segment inner packet. */ |
74 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); | 75 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); |
75 | segs = skb_mac_gso_segment(skb, enc_features); | 76 | segs = skb_mac_gso_segment(skb, enc_features); |
76 | if (!segs || IS_ERR(segs)) | 77 | if (!segs || IS_ERR(segs)) { |
78 | skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); | ||
77 | goto out; | 79 | goto out; |
80 | } | ||
78 | 81 | ||
79 | skb = segs; | 82 | skb = segs; |
80 | tnl_hlen = skb_tnl_header_len(skb); | 83 | tnl_hlen = skb_tnl_header_len(skb); |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 56a964a553d2..e34dccbc4d70 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | |||
106 | 106 | ||
107 | r->id.idiag_sport = inet->inet_sport; | 107 | r->id.idiag_sport = inet->inet_sport; |
108 | r->id.idiag_dport = inet->inet_dport; | 108 | r->id.idiag_dport = inet->inet_dport; |
109 | |||
110 | memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); | ||
111 | memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); | ||
112 | |||
109 | r->id.idiag_src[0] = inet->inet_rcv_saddr; | 113 | r->id.idiag_src[0] = inet->inet_rcv_saddr; |
110 | r->id.idiag_dst[0] = inet->inet_daddr; | 114 | r->id.idiag_dst[0] = inet->inet_daddr; |
111 | 115 | ||
@@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, | |||
240 | 244 | ||
241 | r->idiag_family = tw->tw_family; | 245 | r->idiag_family = tw->tw_family; |
242 | r->idiag_retrans = 0; | 246 | r->idiag_retrans = 0; |
247 | |||
243 | r->id.idiag_if = tw->tw_bound_dev_if; | 248 | r->id.idiag_if = tw->tw_bound_dev_if; |
244 | sock_diag_save_cookie(tw, r->id.idiag_cookie); | 249 | sock_diag_save_cookie(tw, r->id.idiag_cookie); |
250 | |||
245 | r->id.idiag_sport = tw->tw_sport; | 251 | r->id.idiag_sport = tw->tw_sport; |
246 | r->id.idiag_dport = tw->tw_dport; | 252 | r->id.idiag_dport = tw->tw_dport; |
253 | |||
254 | memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); | ||
255 | memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); | ||
256 | |||
247 | r->id.idiag_src[0] = tw->tw_rcv_saddr; | 257 | r->id.idiag_src[0] = tw->tw_rcv_saddr; |
248 | r->id.idiag_dst[0] = tw->tw_daddr; | 258 | r->id.idiag_dst[0] = tw->tw_daddr; |
259 | |||
249 | r->idiag_state = tw->tw_substate; | 260 | r->idiag_state = tw->tw_substate; |
250 | r->idiag_timer = 3; | 261 | r->idiag_timer = 3; |
251 | r->idiag_expires = jiffies_to_msecs(tmo); | 262 | r->idiag_expires = jiffies_to_msecs(tmo); |
@@ -726,8 +737,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, | |||
726 | 737 | ||
727 | r->id.idiag_sport = inet->inet_sport; | 738 | r->id.idiag_sport = inet->inet_sport; |
728 | r->id.idiag_dport = ireq->ir_rmt_port; | 739 | r->id.idiag_dport = ireq->ir_rmt_port; |
740 | |||
741 | memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); | ||
742 | memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); | ||
743 | |||
729 | r->id.idiag_src[0] = ireq->ir_loc_addr; | 744 | r->id.idiag_src[0] = ireq->ir_loc_addr; |
730 | r->id.idiag_dst[0] = ireq->ir_rmt_addr; | 745 | r->id.idiag_dst[0] = ireq->ir_rmt_addr; |
746 | |||
731 | r->idiag_expires = jiffies_to_msecs(tmo); | 747 | r->idiag_expires = jiffies_to_msecs(tmo); |
732 | r->idiag_rqueue = 0; | 748 | r->idiag_rqueue = 0; |
733 | r->idiag_wqueue = 0; | 749 | r->idiag_wqueue = 0; |
@@ -914,12 +930,15 @@ skip_listen_ht: | |||
914 | spin_lock_bh(lock); | 930 | spin_lock_bh(lock); |
915 | sk_nulls_for_each(sk, node, &head->chain) { | 931 | sk_nulls_for_each(sk, node, &head->chain) { |
916 | int res; | 932 | int res; |
933 | int state; | ||
917 | 934 | ||
918 | if (!net_eq(sock_net(sk), net)) | 935 | if (!net_eq(sock_net(sk), net)) |
919 | continue; | 936 | continue; |
920 | if (num < s_num) | 937 | if (num < s_num) |
921 | goto next_normal; | 938 | goto next_normal; |
922 | if (!(r->idiag_states & (1 << sk->sk_state))) | 939 | state = (sk->sk_state == TCP_TIME_WAIT) ? |
940 | inet_twsk(sk)->tw_substate : sk->sk_state; | ||
941 | if (!(r->idiag_states & (1 << state))) | ||
923 | goto next_normal; | 942 | goto next_normal; |
924 | if (r->sdiag_family != AF_UNSPEC && | 943 | if (r->sdiag_family != AF_UNSPEC && |
925 | sk->sk_family != r->sdiag_family) | 944 | sk->sk_family != r->sdiag_family) |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d7aea4c5b940..e560ef34cf4b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) | |||
217 | iph->saddr, iph->daddr, tpi->key); | 217 | iph->saddr, iph->daddr, tpi->key); |
218 | 218 | ||
219 | if (tunnel) { | 219 | if (tunnel) { |
220 | skb_pop_mac_header(skb); | ||
220 | ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error); | 221 | ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error); |
221 | return PACKET_RCVD; | 222 | return PACKET_RCVD; |
222 | } | 223 | } |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 912402752f2f..df184616493f 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk, | |||
828 | 828 | ||
829 | if (cork->length + length > maxnonfragsize - fragheaderlen) { | 829 | if (cork->length + length > maxnonfragsize - fragheaderlen) { |
830 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, | 830 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, |
831 | mtu-exthdrlen); | 831 | mtu - (opt ? opt->optlen : 0)); |
832 | return -EMSGSIZE; | 832 | return -EMSGSIZE; |
833 | } | 833 | } |
834 | 834 | ||
@@ -1151,7 +1151,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, | |||
1151 | mtu : 0xFFFF; | 1151 | mtu : 0xFFFF; |
1152 | 1152 | ||
1153 | if (cork->length + size > maxnonfragsize - fragheaderlen) { | 1153 | if (cork->length + size > maxnonfragsize - fragheaderlen) { |
1154 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu); | 1154 | ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, |
1155 | mtu - (opt ? opt->optlen : 0)); | ||
1155 | return -EMSGSIZE; | 1156 | return -EMSGSIZE; |
1156 | } | 1157 | } |
1157 | 1158 | ||
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 62212c772a4b..1672409f5ba5 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -157,9 +157,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id) | |||
157 | static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, | 157 | static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, |
158 | struct mr_table **mrt) | 158 | struct mr_table **mrt) |
159 | { | 159 | { |
160 | struct ipmr_result res; | ||
161 | struct fib_lookup_arg arg = { .result = &res, }; | ||
162 | int err; | 160 | int err; |
161 | struct ipmr_result res; | ||
162 | struct fib_lookup_arg arg = { | ||
163 | .result = &res, | ||
164 | .flags = FIB_LOOKUP_NOREF, | ||
165 | }; | ||
163 | 166 | ||
164 | err = fib_rules_lookup(net->ipv4.mr_rules_ops, | 167 | err = fib_rules_lookup(net->ipv4.mr_rules_ops, |
165 | flowi4_to_flowi(flp4), 0, &arg); | 168 | flowi4_to_flowi(flp4), 0, &arg); |
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index f13bd91d9a56..a313c3fbeb46 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c | |||
@@ -423,6 +423,7 @@ static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par) | |||
423 | static struct xt_target synproxy_tg4_reg __read_mostly = { | 423 | static struct xt_target synproxy_tg4_reg __read_mostly = { |
424 | .name = "SYNPROXY", | 424 | .name = "SYNPROXY", |
425 | .family = NFPROTO_IPV4, | 425 | .family = NFPROTO_IPV4, |
426 | .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), | ||
426 | .target = synproxy_tg4, | 427 | .target = synproxy_tg4, |
427 | .targetsize = sizeof(struct xt_synproxy_info), | 428 | .targetsize = sizeof(struct xt_synproxy_info), |
428 | .checkentry = synproxy_tg4_check, | 429 | .checkentry = synproxy_tg4_check, |
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c index fff5ba1a33b7..4a5e94ac314a 100644 --- a/net/ipv4/netfilter/nft_reject_ipv4.c +++ b/net/ipv4/netfilter/nft_reject_ipv4.c | |||
@@ -72,7 +72,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
72 | { | 72 | { |
73 | const struct nft_reject *priv = nft_expr_priv(expr); | 73 | const struct nft_reject *priv = nft_expr_priv(expr); |
74 | 74 | ||
75 | if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type)) | 75 | if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type))) |
76 | goto nla_put_failure; | 76 | goto nla_put_failure; |
77 | 77 | ||
78 | switch (priv->type) { | 78 | switch (priv->type) { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c4638e6f0238..82de78603686 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1623,11 +1623,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1623 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && | 1623 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && |
1624 | !sysctl_tcp_low_latency && | 1624 | !sysctl_tcp_low_latency && |
1625 | net_dma_find_channel()) { | 1625 | net_dma_find_channel()) { |
1626 | preempt_enable_no_resched(); | 1626 | preempt_enable(); |
1627 | tp->ucopy.pinned_list = | 1627 | tp->ucopy.pinned_list = |
1628 | dma_pin_iovec_pages(msg->msg_iov, len); | 1628 | dma_pin_iovec_pages(msg->msg_iov, len); |
1629 | } else { | 1629 | } else { |
1630 | preempt_enable_no_resched(); | 1630 | preempt_enable(); |
1631 | } | 1631 | } |
1632 | } | 1632 | } |
1633 | #endif | 1633 | #endif |
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 06493736fbc8..098b3a29f6f3 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c | |||
@@ -22,6 +22,9 @@ | |||
22 | 22 | ||
23 | int sysctl_tcp_nometrics_save __read_mostly; | 23 | int sysctl_tcp_nometrics_save __read_mostly; |
24 | 24 | ||
25 | static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr, | ||
26 | struct net *net, unsigned int hash); | ||
27 | |||
25 | struct tcp_fastopen_metrics { | 28 | struct tcp_fastopen_metrics { |
26 | u16 mss; | 29 | u16 mss; |
27 | u16 syn_loss:10; /* Recurring Fast Open SYN losses */ | 30 | u16 syn_loss:10; /* Recurring Fast Open SYN losses */ |
@@ -130,16 +133,41 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst, | |||
130 | } | 133 | } |
131 | } | 134 | } |
132 | 135 | ||
136 | #define TCP_METRICS_TIMEOUT (60 * 60 * HZ) | ||
137 | |||
138 | static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) | ||
139 | { | ||
140 | if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) | ||
141 | tcpm_suck_dst(tm, dst, false); | ||
142 | } | ||
143 | |||
144 | #define TCP_METRICS_RECLAIM_DEPTH 5 | ||
145 | #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL | ||
146 | |||
133 | static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, | 147 | static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, |
134 | struct inetpeer_addr *addr, | 148 | struct inetpeer_addr *addr, |
135 | unsigned int hash, | 149 | unsigned int hash) |
136 | bool reclaim) | ||
137 | { | 150 | { |
138 | struct tcp_metrics_block *tm; | 151 | struct tcp_metrics_block *tm; |
139 | struct net *net; | 152 | struct net *net; |
153 | bool reclaim = false; | ||
140 | 154 | ||
141 | spin_lock_bh(&tcp_metrics_lock); | 155 | spin_lock_bh(&tcp_metrics_lock); |
142 | net = dev_net(dst->dev); | 156 | net = dev_net(dst->dev); |
157 | |||
158 | /* While waiting for the spin-lock the cache might have been populated | ||
159 | * with this entry and so we have to check again. | ||
160 | */ | ||
161 | tm = __tcp_get_metrics(addr, net, hash); | ||
162 | if (tm == TCP_METRICS_RECLAIM_PTR) { | ||
163 | reclaim = true; | ||
164 | tm = NULL; | ||
165 | } | ||
166 | if (tm) { | ||
167 | tcpm_check_stamp(tm, dst); | ||
168 | goto out_unlock; | ||
169 | } | ||
170 | |||
143 | if (unlikely(reclaim)) { | 171 | if (unlikely(reclaim)) { |
144 | struct tcp_metrics_block *oldest; | 172 | struct tcp_metrics_block *oldest; |
145 | 173 | ||
@@ -169,17 +197,6 @@ out_unlock: | |||
169 | return tm; | 197 | return tm; |
170 | } | 198 | } |
171 | 199 | ||
172 | #define TCP_METRICS_TIMEOUT (60 * 60 * HZ) | ||
173 | |||
174 | static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) | ||
175 | { | ||
176 | if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) | ||
177 | tcpm_suck_dst(tm, dst, false); | ||
178 | } | ||
179 | |||
180 | #define TCP_METRICS_RECLAIM_DEPTH 5 | ||
181 | #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL | ||
182 | |||
183 | static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth) | 200 | static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth) |
184 | { | 201 | { |
185 | if (tm) | 202 | if (tm) |
@@ -282,7 +299,6 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, | |||
282 | struct inetpeer_addr addr; | 299 | struct inetpeer_addr addr; |
283 | unsigned int hash; | 300 | unsigned int hash; |
284 | struct net *net; | 301 | struct net *net; |
285 | bool reclaim; | ||
286 | 302 | ||
287 | addr.family = sk->sk_family; | 303 | addr.family = sk->sk_family; |
288 | switch (addr.family) { | 304 | switch (addr.family) { |
@@ -304,13 +320,10 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, | |||
304 | hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); | 320 | hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); |
305 | 321 | ||
306 | tm = __tcp_get_metrics(&addr, net, hash); | 322 | tm = __tcp_get_metrics(&addr, net, hash); |
307 | reclaim = false; | 323 | if (tm == TCP_METRICS_RECLAIM_PTR) |
308 | if (tm == TCP_METRICS_RECLAIM_PTR) { | ||
309 | reclaim = true; | ||
310 | tm = NULL; | 324 | tm = NULL; |
311 | } | ||
312 | if (!tm && create) | 325 | if (!tm && create) |
313 | tm = tcpm_new(dst, &addr, hash, reclaim); | 326 | tm = tcpm_new(dst, &addr, hash); |
314 | else | 327 | else |
315 | tcpm_check_stamp(tm, dst); | 328 | tcpm_check_stamp(tm, dst); |
316 | 329 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 62c19fdd102d..a7e4729e974b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1600,20 +1600,15 @@ static void flush_stack(struct sock **stack, unsigned int count, | |||
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | /* For TCP sockets, sk_rx_dst is protected by socket lock | 1602 | /* For TCP sockets, sk_rx_dst is protected by socket lock |
1603 | * For UDP, we use sk_dst_lock to guard against concurrent changes. | 1603 | * For UDP, we use xchg() to guard against concurrent changes. |
1604 | */ | 1604 | */ |
1605 | static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) | 1605 | static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) |
1606 | { | 1606 | { |
1607 | struct dst_entry *old; | 1607 | struct dst_entry *old; |
1608 | 1608 | ||
1609 | spin_lock(&sk->sk_dst_lock); | 1609 | dst_hold(dst); |
1610 | old = sk->sk_rx_dst; | 1610 | old = xchg(&sk->sk_rx_dst, dst); |
1611 | if (likely(old != dst)) { | 1611 | dst_release(old); |
1612 | dst_hold(dst); | ||
1613 | sk->sk_rx_dst = dst; | ||
1614 | dst_release(old); | ||
1615 | } | ||
1616 | spin_unlock(&sk->sk_dst_lock); | ||
1617 | } | 1612 | } |
1618 | 1613 | ||
1619 | /* | 1614 | /* |
@@ -2483,6 +2478,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, | |||
2483 | netdev_features_t features) | 2478 | netdev_features_t features) |
2484 | { | 2479 | { |
2485 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 2480 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
2481 | u16 mac_offset = skb->mac_header; | ||
2486 | int mac_len = skb->mac_len; | 2482 | int mac_len = skb->mac_len; |
2487 | int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); | 2483 | int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); |
2488 | __be16 protocol = skb->protocol; | 2484 | __be16 protocol = skb->protocol; |
@@ -2502,8 +2498,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, | |||
2502 | /* segment inner packet. */ | 2498 | /* segment inner packet. */ |
2503 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); | 2499 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); |
2504 | segs = skb_mac_gso_segment(skb, enc_features); | 2500 | segs = skb_mac_gso_segment(skb, enc_features); |
2505 | if (!segs || IS_ERR(segs)) | 2501 | if (!segs || IS_ERR(segs)) { |
2502 | skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, | ||
2503 | mac_len); | ||
2506 | goto out; | 2504 | goto out; |
2505 | } | ||
2507 | 2506 | ||
2508 | outer_hlen = skb_tnl_header_len(skb); | 2507 | outer_hlen = skb_tnl_header_len(skb); |
2509 | skb = segs; | 2508 | skb = segs; |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 83206de2bc76..79c62bdcd3c5 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, | |||
41 | { | 41 | { |
42 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 42 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
43 | unsigned int mss; | 43 | unsigned int mss; |
44 | int offset; | ||
45 | __wsum csum; | ||
46 | |||
47 | if (skb->encapsulation && | ||
48 | skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) { | ||
49 | segs = skb_udp_tunnel_segment(skb, features); | ||
50 | goto out; | ||
51 | } | ||
44 | 52 | ||
45 | mss = skb_shinfo(skb)->gso_size; | 53 | mss = skb_shinfo(skb)->gso_size; |
46 | if (unlikely(skb->len <= mss)) | 54 | if (unlikely(skb->len <= mss)) |
@@ -63,27 +71,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, | |||
63 | goto out; | 71 | goto out; |
64 | } | 72 | } |
65 | 73 | ||
74 | /* Do software UFO. Complete and fill in the UDP checksum as | ||
75 | * HW cannot do checksum of UDP packets sent as multiple | ||
76 | * IP fragments. | ||
77 | */ | ||
78 | offset = skb_checksum_start_offset(skb); | ||
79 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
80 | offset += skb->csum_offset; | ||
81 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | ||
82 | skb->ip_summed = CHECKSUM_NONE; | ||
83 | |||
66 | /* Fragment the skb. IP headers of the fragments are updated in | 84 | /* Fragment the skb. IP headers of the fragments are updated in |
67 | * inet_gso_segment() | 85 | * inet_gso_segment() |
68 | */ | 86 | */ |
69 | if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) | 87 | segs = skb_segment(skb, features); |
70 | segs = skb_udp_tunnel_segment(skb, features); | ||
71 | else { | ||
72 | int offset; | ||
73 | __wsum csum; | ||
74 | |||
75 | /* Do software UFO. Complete and fill in the UDP checksum as | ||
76 | * HW cannot do checksum of UDP packets sent as multiple | ||
77 | * IP fragments. | ||
78 | */ | ||
79 | offset = skb_checksum_start_offset(skb); | ||
80 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
81 | offset += skb->csum_offset; | ||
82 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | ||
83 | skb->ip_summed = CHECKSUM_NONE; | ||
84 | |||
85 | segs = skb_segment(skb, features); | ||
86 | } | ||
87 | out: | 88 | out: |
88 | return segs; | 89 | return segs; |
89 | } | 90 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d5fa5b8c443e..4b6b720971b9 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1671,7 +1671,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) | |||
1671 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | 1671 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) |
1672 | { | 1672 | { |
1673 | struct in6_addr addr; | 1673 | struct in6_addr addr; |
1674 | if (ifp->prefix_len == 127) /* RFC 6164 */ | 1674 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
1675 | return; | 1675 | return; |
1676 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1676 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
1677 | if (ipv6_addr_any(&addr)) | 1677 | if (ipv6_addr_any(&addr)) |
@@ -1682,7 +1682,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | |||
1682 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) | 1682 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) |
1683 | { | 1683 | { |
1684 | struct in6_addr addr; | 1684 | struct in6_addr addr; |
1685 | if (ifp->prefix_len == 127) /* RFC 6164 */ | 1685 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
1686 | return; | 1686 | return; |
1687 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1687 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
1688 | if (ipv6_addr_any(&addr)) | 1688 | if (ipv6_addr_any(&addr)) |
@@ -2509,7 +2509,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, | |||
2509 | struct inet6_ifaddr *ifp; | 2509 | struct inet6_ifaddr *ifp; |
2510 | 2510 | ||
2511 | ifp = ipv6_add_addr(idev, addr, NULL, plen, | 2511 | ifp = ipv6_add_addr(idev, addr, NULL, plen, |
2512 | scope, IFA_F_PERMANENT, 0, 0); | 2512 | scope, IFA_F_PERMANENT, |
2513 | INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); | ||
2513 | if (!IS_ERR(ifp)) { | 2514 | if (!IS_ERR(ifp)) { |
2514 | spin_lock_bh(&ifp->lock); | 2515 | spin_lock_bh(&ifp->lock); |
2515 | ifp->flags &= ~IFA_F_TENTATIVE; | 2516 | ifp->flags &= ~IFA_F_TENTATIVE; |
@@ -2637,7 +2638,8 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr | |||
2637 | #endif | 2638 | #endif |
2638 | 2639 | ||
2639 | 2640 | ||
2640 | ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0); | 2641 | ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, |
2642 | INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); | ||
2641 | if (!IS_ERR(ifp)) { | 2643 | if (!IS_ERR(ifp)) { |
2642 | addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); | 2644 | addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); |
2643 | addrconf_dad_start(ifp); | 2645 | addrconf_dad_start(ifp); |
@@ -3187,6 +3189,22 @@ out: | |||
3187 | in6_ifa_put(ifp); | 3189 | in6_ifa_put(ifp); |
3188 | } | 3190 | } |
3189 | 3191 | ||
3192 | /* ifp->idev must be at least read locked */ | ||
3193 | static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp) | ||
3194 | { | ||
3195 | struct inet6_ifaddr *ifpiter; | ||
3196 | struct inet6_dev *idev = ifp->idev; | ||
3197 | |||
3198 | list_for_each_entry(ifpiter, &idev->addr_list, if_list) { | ||
3199 | if (ifp != ifpiter && ifpiter->scope == IFA_LINK && | ||
3200 | (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE| | ||
3201 | IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) == | ||
3202 | IFA_F_PERMANENT) | ||
3203 | return false; | ||
3204 | } | ||
3205 | return true; | ||
3206 | } | ||
3207 | |||
3190 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | 3208 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp) |
3191 | { | 3209 | { |
3192 | struct net_device *dev = ifp->idev->dev; | 3210 | struct net_device *dev = ifp->idev->dev; |
@@ -3206,14 +3224,11 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | |||
3206 | */ | 3224 | */ |
3207 | 3225 | ||
3208 | read_lock_bh(&ifp->idev->lock); | 3226 | read_lock_bh(&ifp->idev->lock); |
3209 | spin_lock(&ifp->lock); | 3227 | send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp); |
3210 | send_mld = ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL && | ||
3211 | ifp->idev->valid_ll_addr_cnt == 1; | ||
3212 | send_rs = send_mld && | 3228 | send_rs = send_mld && |
3213 | ipv6_accept_ra(ifp->idev) && | 3229 | ipv6_accept_ra(ifp->idev) && |
3214 | ifp->idev->cnf.rtr_solicits > 0 && | 3230 | ifp->idev->cnf.rtr_solicits > 0 && |
3215 | (dev->flags&IFF_LOOPBACK) == 0; | 3231 | (dev->flags&IFF_LOOPBACK) == 0; |
3216 | spin_unlock(&ifp->lock); | ||
3217 | read_unlock_bh(&ifp->idev->lock); | 3232 | read_unlock_bh(&ifp->idev->lock); |
3218 | 3233 | ||
3219 | /* While dad is in progress mld report's source address is in6_addrany. | 3234 | /* While dad is in progress mld report's source address is in6_addrany. |
@@ -3456,7 +3471,12 @@ restart: | |||
3456 | &inet6_addr_lst[i], addr_lst) { | 3471 | &inet6_addr_lst[i], addr_lst) { |
3457 | unsigned long age; | 3472 | unsigned long age; |
3458 | 3473 | ||
3459 | if (ifp->flags & IFA_F_PERMANENT) | 3474 | /* When setting preferred_lft to a value not zero or |
3475 | * infinity, while valid_lft is infinity | ||
3476 | * IFA_F_PERMANENT has a non-infinity life time. | ||
3477 | */ | ||
3478 | if ((ifp->flags & IFA_F_PERMANENT) && | ||
3479 | (ifp->prefered_lft == INFINITY_LIFE_TIME)) | ||
3460 | continue; | 3480 | continue; |
3461 | 3481 | ||
3462 | spin_lock(&ifp->lock); | 3482 | spin_lock(&ifp->lock); |
@@ -3481,7 +3501,8 @@ restart: | |||
3481 | ifp->flags |= IFA_F_DEPRECATED; | 3501 | ifp->flags |= IFA_F_DEPRECATED; |
3482 | } | 3502 | } |
3483 | 3503 | ||
3484 | if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)) | 3504 | if ((ifp->valid_lft != INFINITY_LIFE_TIME) && |
3505 | (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))) | ||
3485 | next = ifp->tstamp + ifp->valid_lft * HZ; | 3506 | next = ifp->tstamp + ifp->valid_lft * HZ; |
3486 | 3507 | ||
3487 | spin_unlock(&ifp->lock); | 3508 | spin_unlock(&ifp->lock); |
@@ -3761,7 +3782,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, | |||
3761 | put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), | 3782 | put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), |
3762 | ifa->idev->dev->ifindex); | 3783 | ifa->idev->dev->ifindex); |
3763 | 3784 | ||
3764 | if (!(ifa->flags&IFA_F_PERMANENT)) { | 3785 | if (!((ifa->flags&IFA_F_PERMANENT) && |
3786 | (ifa->prefered_lft == INFINITY_LIFE_TIME))) { | ||
3765 | preferred = ifa->prefered_lft; | 3787 | preferred = ifa->prefered_lft; |
3766 | valid = ifa->valid_lft; | 3788 | valid = ifa->valid_lft; |
3767 | if (preferred != INFINITY_LIFE_TIME) { | 3789 | if (preferred != INFINITY_LIFE_TIME) { |
@@ -4503,19 +4525,6 @@ errout: | |||
4503 | rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err); | 4525 | rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err); |
4504 | } | 4526 | } |
4505 | 4527 | ||
4506 | static void update_valid_ll_addr_cnt(struct inet6_ifaddr *ifp, int count) | ||
4507 | { | ||
4508 | write_lock_bh(&ifp->idev->lock); | ||
4509 | spin_lock(&ifp->lock); | ||
4510 | if (((ifp->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|IFA_F_OPTIMISTIC| | ||
4511 | IFA_F_DADFAILED)) == IFA_F_PERMANENT) && | ||
4512 | (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) | ||
4513 | ifp->idev->valid_ll_addr_cnt += count; | ||
4514 | WARN_ON(ifp->idev->valid_ll_addr_cnt < 0); | ||
4515 | spin_unlock(&ifp->lock); | ||
4516 | write_unlock_bh(&ifp->idev->lock); | ||
4517 | } | ||
4518 | |||
4519 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | 4528 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) |
4520 | { | 4529 | { |
4521 | struct net *net = dev_net(ifp->idev->dev); | 4530 | struct net *net = dev_net(ifp->idev->dev); |
@@ -4524,8 +4533,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
4524 | 4533 | ||
4525 | switch (event) { | 4534 | switch (event) { |
4526 | case RTM_NEWADDR: | 4535 | case RTM_NEWADDR: |
4527 | update_valid_ll_addr_cnt(ifp, 1); | ||
4528 | |||
4529 | /* | 4536 | /* |
4530 | * If the address was optimistic | 4537 | * If the address was optimistic |
4531 | * we inserted the route at the start of | 4538 | * we inserted the route at the start of |
@@ -4541,8 +4548,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
4541 | ifp->idev->dev, 0, 0); | 4548 | ifp->idev->dev, 0, 0); |
4542 | break; | 4549 | break; |
4543 | case RTM_DELADDR: | 4550 | case RTM_DELADDR: |
4544 | update_valid_ll_addr_cnt(ifp, -1); | ||
4545 | |||
4546 | if (ifp->idev->cnf.forwarding) | 4551 | if (ifp->idev->cnf.forwarding) |
4547 | addrconf_leave_anycast(ifp); | 4552 | addrconf_leave_anycast(ifp); |
4548 | addrconf_leave_solict(ifp->idev, &ifp->addr); | 4553 | addrconf_leave_solict(ifp->idev, &ifp->addr); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 4acdb63495db..e6f931997996 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -1193,11 +1193,35 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1193 | 1193 | ||
1194 | fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + | 1194 | fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + |
1195 | (opt ? opt->opt_nflen : 0); | 1195 | (opt ? opt->opt_nflen : 0); |
1196 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); | 1196 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - |
1197 | sizeof(struct frag_hdr); | ||
1197 | 1198 | ||
1198 | if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { | 1199 | if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { |
1199 | if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { | 1200 | unsigned int maxnonfragsize, headersize; |
1200 | ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen); | 1201 | |
1202 | headersize = sizeof(struct ipv6hdr) + | ||
1203 | (opt ? opt->tot_len : 0) + | ||
1204 | (dst_allfrag(&rt->dst) ? | ||
1205 | sizeof(struct frag_hdr) : 0) + | ||
1206 | rt->rt6i_nfheader_len; | ||
1207 | |||
1208 | maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ? | ||
1209 | mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN; | ||
1210 | |||
1211 | /* dontfrag active */ | ||
1212 | if ((cork->length + length > mtu - headersize) && dontfrag && | ||
1213 | (sk->sk_protocol == IPPROTO_UDP || | ||
1214 | sk->sk_protocol == IPPROTO_RAW)) { | ||
1215 | ipv6_local_rxpmtu(sk, fl6, mtu - headersize + | ||
1216 | sizeof(struct ipv6hdr)); | ||
1217 | goto emsgsize; | ||
1218 | } | ||
1219 | |||
1220 | if (cork->length + length > maxnonfragsize - headersize) { | ||
1221 | emsgsize: | ||
1222 | ipv6_local_error(sk, EMSGSIZE, fl6, | ||
1223 | mtu - headersize + | ||
1224 | sizeof(struct ipv6hdr)); | ||
1201 | return -EMSGSIZE; | 1225 | return -EMSGSIZE; |
1202 | } | 1226 | } |
1203 | } | 1227 | } |
@@ -1222,12 +1246,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1222 | * --yoshfuji | 1246 | * --yoshfuji |
1223 | */ | 1247 | */ |
1224 | 1248 | ||
1225 | if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP || | ||
1226 | sk->sk_protocol == IPPROTO_RAW)) { | ||
1227 | ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); | ||
1228 | return -EMSGSIZE; | ||
1229 | } | ||
1230 | |||
1231 | skb = skb_peek_tail(&sk->sk_write_queue); | 1249 | skb = skb_peek_tail(&sk->sk_write_queue); |
1232 | cork->length += length; | 1250 | cork->length += length; |
1233 | if (((length > mtu) || | 1251 | if (((length > mtu) || |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index d6062325db08..7881965a8248 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -103,16 +103,25 @@ struct ip6_tnl_net { | |||
103 | 103 | ||
104 | static struct net_device_stats *ip6_get_stats(struct net_device *dev) | 104 | static struct net_device_stats *ip6_get_stats(struct net_device *dev) |
105 | { | 105 | { |
106 | struct pcpu_tstats sum = { 0 }; | 106 | struct pcpu_tstats tmp, sum = { 0 }; |
107 | int i; | 107 | int i; |
108 | 108 | ||
109 | for_each_possible_cpu(i) { | 109 | for_each_possible_cpu(i) { |
110 | unsigned int start; | ||
110 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); | 111 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); |
111 | 112 | ||
112 | sum.rx_packets += tstats->rx_packets; | 113 | do { |
113 | sum.rx_bytes += tstats->rx_bytes; | 114 | start = u64_stats_fetch_begin_bh(&tstats->syncp); |
114 | sum.tx_packets += tstats->tx_packets; | 115 | tmp.rx_packets = tstats->rx_packets; |
115 | sum.tx_bytes += tstats->tx_bytes; | 116 | tmp.rx_bytes = tstats->rx_bytes; |
117 | tmp.tx_packets = tstats->tx_packets; | ||
118 | tmp.tx_bytes = tstats->tx_bytes; | ||
119 | } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); | ||
120 | |||
121 | sum.rx_packets += tmp.rx_packets; | ||
122 | sum.rx_bytes += tmp.rx_bytes; | ||
123 | sum.tx_packets += tmp.tx_packets; | ||
124 | sum.tx_bytes += tmp.tx_bytes; | ||
116 | } | 125 | } |
117 | dev->stats.rx_packets = sum.rx_packets; | 126 | dev->stats.rx_packets = sum.rx_packets; |
118 | dev->stats.rx_bytes = sum.rx_bytes; | 127 | dev->stats.rx_bytes = sum.rx_bytes; |
@@ -824,8 +833,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, | |||
824 | } | 833 | } |
825 | 834 | ||
826 | tstats = this_cpu_ptr(t->dev->tstats); | 835 | tstats = this_cpu_ptr(t->dev->tstats); |
836 | u64_stats_update_begin(&tstats->syncp); | ||
827 | tstats->rx_packets++; | 837 | tstats->rx_packets++; |
828 | tstats->rx_bytes += skb->len; | 838 | tstats->rx_bytes += skb->len; |
839 | u64_stats_update_end(&tstats->syncp); | ||
829 | 840 | ||
830 | netif_rx(skb); | 841 | netif_rx(skb); |
831 | 842 | ||
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index ed94ba61dda0..7b42d5ef868d 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -75,26 +75,6 @@ struct vti6_net { | |||
75 | struct ip6_tnl __rcu **tnls[2]; | 75 | struct ip6_tnl __rcu **tnls[2]; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | static struct net_device_stats *vti6_get_stats(struct net_device *dev) | ||
79 | { | ||
80 | struct pcpu_tstats sum = { 0 }; | ||
81 | int i; | ||
82 | |||
83 | for_each_possible_cpu(i) { | ||
84 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); | ||
85 | |||
86 | sum.rx_packets += tstats->rx_packets; | ||
87 | sum.rx_bytes += tstats->rx_bytes; | ||
88 | sum.tx_packets += tstats->tx_packets; | ||
89 | sum.tx_bytes += tstats->tx_bytes; | ||
90 | } | ||
91 | dev->stats.rx_packets = sum.rx_packets; | ||
92 | dev->stats.rx_bytes = sum.rx_bytes; | ||
93 | dev->stats.tx_packets = sum.tx_packets; | ||
94 | dev->stats.tx_bytes = sum.tx_bytes; | ||
95 | return &dev->stats; | ||
96 | } | ||
97 | |||
98 | #define for_each_vti6_tunnel_rcu(start) \ | 78 | #define for_each_vti6_tunnel_rcu(start) \ |
99 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) | 79 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) |
100 | 80 | ||
@@ -331,8 +311,10 @@ static int vti6_rcv(struct sk_buff *skb) | |||
331 | } | 311 | } |
332 | 312 | ||
333 | tstats = this_cpu_ptr(t->dev->tstats); | 313 | tstats = this_cpu_ptr(t->dev->tstats); |
314 | u64_stats_update_begin(&tstats->syncp); | ||
334 | tstats->rx_packets++; | 315 | tstats->rx_packets++; |
335 | tstats->rx_bytes += skb->len; | 316 | tstats->rx_bytes += skb->len; |
317 | u64_stats_update_end(&tstats->syncp); | ||
336 | 318 | ||
337 | skb->mark = 0; | 319 | skb->mark = 0; |
338 | secpath_reset(skb); | 320 | secpath_reset(skb); |
@@ -716,7 +698,7 @@ static const struct net_device_ops vti6_netdev_ops = { | |||
716 | .ndo_start_xmit = vti6_tnl_xmit, | 698 | .ndo_start_xmit = vti6_tnl_xmit, |
717 | .ndo_do_ioctl = vti6_ioctl, | 699 | .ndo_do_ioctl = vti6_ioctl, |
718 | .ndo_change_mtu = vti6_change_mtu, | 700 | .ndo_change_mtu = vti6_change_mtu, |
719 | .ndo_get_stats = vti6_get_stats, | 701 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
720 | }; | 702 | }; |
721 | 703 | ||
722 | /** | 704 | /** |
@@ -750,12 +732,18 @@ static void vti6_dev_setup(struct net_device *dev) | |||
750 | static inline int vti6_dev_init_gen(struct net_device *dev) | 732 | static inline int vti6_dev_init_gen(struct net_device *dev) |
751 | { | 733 | { |
752 | struct ip6_tnl *t = netdev_priv(dev); | 734 | struct ip6_tnl *t = netdev_priv(dev); |
735 | int i; | ||
753 | 736 | ||
754 | t->dev = dev; | 737 | t->dev = dev; |
755 | t->net = dev_net(dev); | 738 | t->net = dev_net(dev); |
756 | dev->tstats = alloc_percpu(struct pcpu_tstats); | 739 | dev->tstats = alloc_percpu(struct pcpu_tstats); |
757 | if (!dev->tstats) | 740 | if (!dev->tstats) |
758 | return -ENOMEM; | 741 | return -ENOMEM; |
742 | for_each_possible_cpu(i) { | ||
743 | struct pcpu_tstats *stats; | ||
744 | stats = per_cpu_ptr(dev->tstats, i); | ||
745 | u64_stats_init(&stats->syncp); | ||
746 | } | ||
759 | return 0; | 747 | return 0; |
760 | } | 748 | } |
761 | 749 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index f365310bfcca..0eb4038a4d63 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -141,9 +141,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) | |||
141 | static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, | 141 | static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, |
142 | struct mr6_table **mrt) | 142 | struct mr6_table **mrt) |
143 | { | 143 | { |
144 | struct ip6mr_result res; | ||
145 | struct fib_lookup_arg arg = { .result = &res, }; | ||
146 | int err; | 144 | int err; |
145 | struct ip6mr_result res; | ||
146 | struct fib_lookup_arg arg = { | ||
147 | .result = &res, | ||
148 | .flags = FIB_LOOKUP_NOREF, | ||
149 | }; | ||
147 | 150 | ||
148 | err = fib_rules_lookup(net->ipv6.mr6_rules_ops, | 151 | err = fib_rules_lookup(net->ipv6.mr6_rules_ops, |
149 | flowi6_to_flowi(flp6), 0, &arg); | 152 | flowi6_to_flowi(flp6), 0, &arg); |
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index f78f41aca8e9..a0d17270117c 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c | |||
@@ -446,6 +446,7 @@ static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par) | |||
446 | static struct xt_target synproxy_tg6_reg __read_mostly = { | 446 | static struct xt_target synproxy_tg6_reg __read_mostly = { |
447 | .name = "SYNPROXY", | 447 | .name = "SYNPROXY", |
448 | .family = NFPROTO_IPV6, | 448 | .family = NFPROTO_IPV6, |
449 | .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), | ||
449 | .target = synproxy_tg6, | 450 | .target = synproxy_tg6, |
450 | .targetsize = sizeof(struct xt_synproxy_info), | 451 | .targetsize = sizeof(struct xt_synproxy_info), |
451 | .checkentry = synproxy_tg6_check, | 452 | .checkentry = synproxy_tg6_check, |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a0a48ac3403f..4b4944c3e4c4 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1905,9 +1905,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, | |||
1905 | else | 1905 | else |
1906 | rt->rt6i_gateway = *dest; | 1906 | rt->rt6i_gateway = *dest; |
1907 | rt->rt6i_flags = ort->rt6i_flags; | 1907 | rt->rt6i_flags = ort->rt6i_flags; |
1908 | if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == | 1908 | rt6_set_from(rt, ort); |
1909 | (RTF_DEFAULT | RTF_ADDRCONF)) | ||
1910 | rt6_set_from(rt, ort); | ||
1911 | rt->rt6i_metric = 0; | 1909 | rt->rt6i_metric = 0; |
1912 | 1910 | ||
1913 | #ifdef CONFIG_IPV6_SUBTREES | 1911 | #ifdef CONFIG_IPV6_SUBTREES |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 366fbba3359a..d3005b34476a 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -702,8 +702,10 @@ static int ipip6_rcv(struct sk_buff *skb) | |||
702 | } | 702 | } |
703 | 703 | ||
704 | tstats = this_cpu_ptr(tunnel->dev->tstats); | 704 | tstats = this_cpu_ptr(tunnel->dev->tstats); |
705 | u64_stats_update_begin(&tstats->syncp); | ||
705 | tstats->rx_packets++; | 706 | tstats->rx_packets++; |
706 | tstats->rx_bytes += skb->len; | 707 | tstats->rx_bytes += skb->len; |
708 | u64_stats_update_end(&tstats->syncp); | ||
707 | 709 | ||
708 | netif_rx(skb); | 710 | netif_rx(skb); |
709 | 711 | ||
@@ -924,7 +926,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
924 | if (tunnel->parms.iph.daddr && skb_dst(skb)) | 926 | if (tunnel->parms.iph.daddr && skb_dst(skb)) |
925 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); | 927 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); |
926 | 928 | ||
927 | if (skb->len > mtu) { | 929 | if (skb->len > mtu && !skb_is_gso(skb)) { |
928 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 930 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
929 | ip_rt_put(rt); | 931 | ip_rt_put(rt); |
930 | goto tx_error; | 932 | goto tx_error; |
@@ -966,8 +968,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
966 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); | 968 | tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); |
967 | 969 | ||
968 | skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); | 970 | skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); |
969 | if (IS_ERR(skb)) | 971 | if (IS_ERR(skb)) { |
972 | ip_rt_put(rt); | ||
970 | goto out; | 973 | goto out; |
974 | } | ||
971 | 975 | ||
972 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, | 976 | err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos, |
973 | ttl, df, !net_eq(tunnel->net, dev_net(dev))); | 977 | ttl, df, !net_eq(tunnel->net, dev_net(dev))); |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 7b01b9f5846c..c71b699eb555 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
715 | unsigned long cpu_flags; | 715 | unsigned long cpu_flags; |
716 | size_t copied = 0; | 716 | size_t copied = 0; |
717 | u32 peek_seq = 0; | 717 | u32 peek_seq = 0; |
718 | u32 *seq; | 718 | u32 *seq, skb_len; |
719 | unsigned long used; | 719 | unsigned long used; |
720 | int target; /* Read at least this many bytes */ | 720 | int target; /* Read at least this many bytes */ |
721 | long timeo; | 721 | long timeo; |
@@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
812 | } | 812 | } |
813 | continue; | 813 | continue; |
814 | found_ok_skb: | 814 | found_ok_skb: |
815 | skb_len = skb->len; | ||
815 | /* Ok so how much can we use? */ | 816 | /* Ok so how much can we use? */ |
816 | used = skb->len - offset; | 817 | used = skb->len - offset; |
817 | if (len < used) | 818 | if (len < used) |
@@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
844 | } | 845 | } |
845 | 846 | ||
846 | /* Partial read */ | 847 | /* Partial read */ |
847 | if (used + offset < skb->len) | 848 | if (used + offset < skb_len) |
848 | continue; | 849 | continue; |
849 | } while (len > 0); | 850 | } while (len > 0); |
850 | 851 | ||
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 36c3a4cbcabf..a0757913046e 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1061,7 +1061,8 @@ static void ieee80211_uninit(struct net_device *dev) | |||
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, | 1063 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, |
1064 | struct sk_buff *skb) | 1064 | struct sk_buff *skb, |
1065 | void *accel_priv) | ||
1065 | { | 1066 | { |
1066 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); | 1067 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); |
1067 | } | 1068 | } |
@@ -1078,7 +1079,8 @@ static const struct net_device_ops ieee80211_dataif_ops = { | |||
1078 | }; | 1079 | }; |
1079 | 1080 | ||
1080 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, | 1081 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, |
1081 | struct sk_buff *skb) | 1082 | struct sk_buff *skb, |
1083 | void *accel_priv) | ||
1082 | { | 1084 | { |
1083 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1085 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1084 | struct ieee80211_local *local = sdata->local; | 1086 | struct ieee80211_local *local = sdata->local; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c558b246ef00..ca7fa7f0613d 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -463,7 +463,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
463 | { | 463 | { |
464 | struct sta_info *sta = tx->sta; | 464 | struct sta_info *sta = tx->sta; |
465 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | 465 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
466 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
467 | struct ieee80211_local *local = tx->local; | 466 | struct ieee80211_local *local = tx->local; |
468 | 467 | ||
469 | if (unlikely(!sta)) | 468 | if (unlikely(!sta)) |
@@ -474,15 +473,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
474 | !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { | 473 | !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { |
475 | int ac = skb_get_queue_mapping(tx->skb); | 474 | int ac = skb_get_queue_mapping(tx->skb); |
476 | 475 | ||
477 | /* only deauth, disassoc and action are bufferable MMPDUs */ | ||
478 | if (ieee80211_is_mgmt(hdr->frame_control) && | ||
479 | !ieee80211_is_deauth(hdr->frame_control) && | ||
480 | !ieee80211_is_disassoc(hdr->frame_control) && | ||
481 | !ieee80211_is_action(hdr->frame_control)) { | ||
482 | info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; | ||
483 | return TX_CONTINUE; | ||
484 | } | ||
485 | |||
486 | ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", | 476 | ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", |
487 | sta->sta.addr, sta->sta.aid, ac); | 477 | sta->sta.addr, sta->sta.aid, ac); |
488 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) | 478 | if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) |
@@ -525,9 +515,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) | |||
525 | static ieee80211_tx_result debug_noinline | 515 | static ieee80211_tx_result debug_noinline |
526 | ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) | 516 | ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) |
527 | { | 517 | { |
518 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); | ||
519 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; | ||
520 | |||
528 | if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) | 521 | if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) |
529 | return TX_CONTINUE; | 522 | return TX_CONTINUE; |
530 | 523 | ||
524 | /* only deauth, disassoc and action are bufferable MMPDUs */ | ||
525 | if (ieee80211_is_mgmt(hdr->frame_control) && | ||
526 | !ieee80211_is_deauth(hdr->frame_control) && | ||
527 | !ieee80211_is_disassoc(hdr->frame_control) && | ||
528 | !ieee80211_is_action(hdr->frame_control)) { | ||
529 | if (tx->flags & IEEE80211_TX_UNICAST) | ||
530 | info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; | ||
531 | return TX_CONTINUE; | ||
532 | } | ||
533 | |||
531 | if (tx->flags & IEEE80211_TX_UNICAST) | 534 | if (tx->flags & IEEE80211_TX_UNICAST) |
532 | return ieee80211_tx_h_unicast_ps_buf(tx); | 535 | return ieee80211_tx_h_unicast_ps_buf(tx); |
533 | else | 536 | else |
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c index c8beafd401aa..5a355a46d1dc 100644 --- a/net/netfilter/ipvs/ip_vs_nfct.c +++ b/net/netfilter/ipvs/ip_vs_nfct.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <net/ip_vs.h> | 63 | #include <net/ip_vs.h> |
64 | #include <net/netfilter/nf_conntrack_core.h> | 64 | #include <net/netfilter/nf_conntrack_core.h> |
65 | #include <net/netfilter/nf_conntrack_expect.h> | 65 | #include <net/netfilter/nf_conntrack_expect.h> |
66 | #include <net/netfilter/nf_conntrack_seqadj.h> | ||
66 | #include <net/netfilter/nf_conntrack_helper.h> | 67 | #include <net/netfilter/nf_conntrack_helper.h> |
67 | #include <net/netfilter/nf_conntrack_zones.h> | 68 | #include <net/netfilter/nf_conntrack_zones.h> |
68 | 69 | ||
@@ -97,6 +98,11 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) | |||
97 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) | 98 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) |
98 | return; | 99 | return; |
99 | 100 | ||
101 | /* Applications may adjust TCP seqs */ | ||
102 | if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP && | ||
103 | !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct)) | ||
104 | return; | ||
105 | |||
100 | /* | 106 | /* |
101 | * The connection is not yet in the hashtable, so we update it. | 107 | * The connection is not yet in the hashtable, so we update it. |
102 | * CIP->VIP will remain the same, so leave the tuple in | 108 | * CIP->VIP will remain the same, so leave the tuple in |
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c index 17c1bcb182c6..f6e2ae91a80b 100644 --- a/net/netfilter/nf_conntrack_seqadj.c +++ b/net/netfilter/nf_conntrack_seqadj.c | |||
@@ -36,6 +36,11 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, | |||
36 | if (off == 0) | 36 | if (off == 0) |
37 | return 0; | 37 | return 0; |
38 | 38 | ||
39 | if (unlikely(!seqadj)) { | ||
40 | WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n"); | ||
41 | return 0; | ||
42 | } | ||
43 | |||
39 | set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); | 44 | set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); |
40 | 45 | ||
41 | spin_lock_bh(&ct->lock); | 46 | spin_lock_bh(&ct->lock); |
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c index 902fb0a6b38a..7a394df0deb7 100644 --- a/net/netfilter/nf_conntrack_timestamp.c +++ b/net/netfilter/nf_conntrack_timestamp.c | |||
@@ -97,7 +97,6 @@ int nf_conntrack_tstamp_pernet_init(struct net *net) | |||
97 | void nf_conntrack_tstamp_pernet_fini(struct net *net) | 97 | void nf_conntrack_tstamp_pernet_fini(struct net *net) |
98 | { | 98 | { |
99 | nf_conntrack_tstamp_fini_sysctl(net); | 99 | nf_conntrack_tstamp_fini_sysctl(net); |
100 | nf_ct_extend_unregister(&tstamp_extend); | ||
101 | } | 100 | } |
102 | 101 | ||
103 | int nf_conntrack_tstamp_init(void) | 102 | int nf_conntrack_tstamp_init(void) |
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c index f02b3605823e..1fb2258c3535 100644 --- a/net/netfilter/nf_nat_irc.c +++ b/net/netfilter/nf_nat_irc.c | |||
@@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb, | |||
34 | struct nf_conntrack_expect *exp) | 34 | struct nf_conntrack_expect *exp) |
35 | { | 35 | { |
36 | char buffer[sizeof("4294967296 65635")]; | 36 | char buffer[sizeof("4294967296 65635")]; |
37 | struct nf_conn *ct = exp->master; | ||
38 | union nf_inet_addr newaddr; | ||
37 | u_int16_t port; | 39 | u_int16_t port; |
38 | unsigned int ret; | 40 | unsigned int ret; |
39 | 41 | ||
40 | /* Reply comes from server. */ | 42 | /* Reply comes from server. */ |
43 | newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; | ||
44 | |||
41 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | 45 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; |
42 | exp->dir = IP_CT_DIR_REPLY; | 46 | exp->dir = IP_CT_DIR_REPLY; |
43 | exp->expectfn = nf_nat_follow_master; | 47 | exp->expectfn = nf_nat_follow_master; |
@@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb, | |||
57 | } | 61 | } |
58 | 62 | ||
59 | if (port == 0) { | 63 | if (port == 0) { |
60 | nf_ct_helper_log(skb, exp->master, "all ports in use"); | 64 | nf_ct_helper_log(skb, ct, "all ports in use"); |
61 | return NF_DROP; | 65 | return NF_DROP; |
62 | } | 66 | } |
63 | 67 | ||
64 | ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, | 68 | /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 |
65 | protoff, matchoff, matchlen, buffer, | 69 | * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 |
66 | strlen(buffer)); | 70 | * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26 |
71 | * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26 | ||
72 | * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27 | ||
73 | * | ||
74 | * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits, | ||
75 | * 255.255.255.255==4294967296, 10 digits) | ||
76 | * P: bound port (min 1 d, max 5d (65635)) | ||
77 | * F: filename (min 1 d ) | ||
78 | * S: size (min 1 d ) | ||
79 | * 0x01, \n: terminators | ||
80 | */ | ||
81 | /* AAA = "us", ie. where server normally talks to. */ | ||
82 | snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port); | ||
83 | pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", | ||
84 | buffer, &newaddr.ip, port); | ||
85 | |||
86 | ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, | ||
87 | matchlen, buffer, strlen(buffer)); | ||
67 | if (ret != NF_ACCEPT) { | 88 | if (ret != NF_ACCEPT) { |
68 | nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); | 89 | nf_ct_helper_log(skb, ct, "cannot mangle packet"); |
69 | nf_ct_unexpect_related(exp); | 90 | nf_ct_unexpect_related(exp); |
70 | } | 91 | } |
92 | |||
71 | return ret; | 93 | return ret; |
72 | } | 94 | } |
73 | 95 | ||
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index f93b7d06f4be..71a9f49a768b 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -312,6 +312,9 @@ static int nf_tables_table_enable(struct nft_table *table) | |||
312 | int err, i = 0; | 312 | int err, i = 0; |
313 | 313 | ||
314 | list_for_each_entry(chain, &table->chains, list) { | 314 | list_for_each_entry(chain, &table->chains, list) { |
315 | if (!(chain->flags & NFT_BASE_CHAIN)) | ||
316 | continue; | ||
317 | |||
315 | err = nf_register_hook(&nft_base_chain(chain)->ops); | 318 | err = nf_register_hook(&nft_base_chain(chain)->ops); |
316 | if (err < 0) | 319 | if (err < 0) |
317 | goto err; | 320 | goto err; |
@@ -321,6 +324,9 @@ static int nf_tables_table_enable(struct nft_table *table) | |||
321 | return 0; | 324 | return 0; |
322 | err: | 325 | err: |
323 | list_for_each_entry(chain, &table->chains, list) { | 326 | list_for_each_entry(chain, &table->chains, list) { |
327 | if (!(chain->flags & NFT_BASE_CHAIN)) | ||
328 | continue; | ||
329 | |||
324 | if (i-- <= 0) | 330 | if (i-- <= 0) |
325 | break; | 331 | break; |
326 | 332 | ||
@@ -333,8 +339,10 @@ static int nf_tables_table_disable(struct nft_table *table) | |||
333 | { | 339 | { |
334 | struct nft_chain *chain; | 340 | struct nft_chain *chain; |
335 | 341 | ||
336 | list_for_each_entry(chain, &table->chains, list) | 342 | list_for_each_entry(chain, &table->chains, list) { |
337 | nf_unregister_hook(&nft_base_chain(chain)->ops); | 343 | if (chain->flags & NFT_BASE_CHAIN) |
344 | nf_unregister_hook(&nft_base_chain(chain)->ops); | ||
345 | } | ||
338 | 346 | ||
339 | return 0; | 347 | return 0; |
340 | } | 348 | } |
@@ -2098,17 +2106,21 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb, | |||
2098 | struct netlink_callback *cb) | 2106 | struct netlink_callback *cb) |
2099 | { | 2107 | { |
2100 | const struct nft_set *set; | 2108 | const struct nft_set *set; |
2101 | unsigned int idx = 0, s_idx = cb->args[0]; | 2109 | unsigned int idx, s_idx = cb->args[0]; |
2102 | struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; | 2110 | struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; |
2103 | 2111 | ||
2104 | if (cb->args[1]) | 2112 | if (cb->args[1]) |
2105 | return skb->len; | 2113 | return skb->len; |
2106 | 2114 | ||
2107 | list_for_each_entry(table, &ctx->afi->tables, list) { | 2115 | list_for_each_entry(table, &ctx->afi->tables, list) { |
2108 | if (cur_table && cur_table != table) | 2116 | if (cur_table) { |
2109 | continue; | 2117 | if (cur_table != table) |
2118 | continue; | ||
2110 | 2119 | ||
2120 | cur_table = NULL; | ||
2121 | } | ||
2111 | ctx->table = table; | 2122 | ctx->table = table; |
2123 | idx = 0; | ||
2112 | list_for_each_entry(set, &ctx->table->sets, list) { | 2124 | list_for_each_entry(set, &ctx->table->sets, list) { |
2113 | if (idx < s_idx) | 2125 | if (idx < s_idx) |
2114 | goto cont; | 2126 | goto cont; |
@@ -2370,7 +2382,9 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, | |||
2370 | enum nft_registers dreg; | 2382 | enum nft_registers dreg; |
2371 | 2383 | ||
2372 | dreg = nft_type_to_reg(set->dtype); | 2384 | dreg = nft_type_to_reg(set->dtype); |
2373 | return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype); | 2385 | return nft_validate_data_load(ctx, dreg, &elem->data, |
2386 | set->dtype == NFT_DATA_VERDICT ? | ||
2387 | NFT_DATA_VERDICT : NFT_DATA_VALUE); | ||
2374 | } | 2388 | } |
2375 | 2389 | ||
2376 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | 2390 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 3c4b69e5fe17..a155d19a225e 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -1053,6 +1053,7 @@ static void __net_exit nfnl_log_net_exit(struct net *net) | |||
1053 | #ifdef CONFIG_PROC_FS | 1053 | #ifdef CONFIG_PROC_FS |
1054 | remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); | 1054 | remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); |
1055 | #endif | 1055 | #endif |
1056 | nf_log_unset(net, &nfulnl_logger); | ||
1056 | } | 1057 | } |
1057 | 1058 | ||
1058 | static struct pernet_operations nfnl_log_net_ops = { | 1059 | static struct pernet_operations nfnl_log_net_ops = { |
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index 8e0bb75e7c51..55c939f5371f 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c | |||
@@ -31,7 +31,7 @@ static void nft_exthdr_eval(const struct nft_expr *expr, | |||
31 | { | 31 | { |
32 | struct nft_exthdr *priv = nft_expr_priv(expr); | 32 | struct nft_exthdr *priv = nft_expr_priv(expr); |
33 | struct nft_data *dest = &data[priv->dreg]; | 33 | struct nft_data *dest = &data[priv->dreg]; |
34 | unsigned int offset; | 34 | unsigned int offset = 0; |
35 | int err; | 35 | int err; |
36 | 36 | ||
37 | err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); | 37 | err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); |
diff --git a/net/nfc/core.c b/net/nfc/core.c index 872529105abc..83b9927e7d19 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c | |||
@@ -384,7 +384,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, | |||
384 | { | 384 | { |
385 | dev->dep_link_up = true; | 385 | dev->dep_link_up = true; |
386 | 386 | ||
387 | if (!dev->active_target) { | 387 | if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) { |
388 | struct nfc_target *target; | 388 | struct nfc_target *target; |
389 | 389 | ||
390 | target = nfc_find_target(dev, target_idx); | 390 | target = nfc_find_target(dev, target_idx); |
diff --git a/net/rds/ib.c b/net/rds/ib.c index b4c8b0022fee..ba2dffeff608 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr) | |||
338 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); | 338 | ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); |
339 | /* due to this, we will claim to support iWARP devices unless we | 339 | /* due to this, we will claim to support iWARP devices unless we |
340 | check node_type. */ | 340 | check node_type. */ |
341 | if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA) | 341 | if (ret || !cm_id->device || |
342 | cm_id->device->node_type != RDMA_NODE_IB_CA) | ||
342 | ret = -EADDRNOTAVAIL; | 343 | ret = -EADDRNOTAVAIL; |
343 | 344 | ||
344 | rdsdebug("addr %pI4 ret %d node type %d\n", | 345 | rdsdebug("addr %pI4 ret %d node type %d\n", |
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 8eb9501e3d60..b7ebe23cdedf 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c | |||
@@ -421,8 +421,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item, | |||
421 | struct rds_ib_refill_cache *cache) | 421 | struct rds_ib_refill_cache *cache) |
422 | { | 422 | { |
423 | unsigned long flags; | 423 | unsigned long flags; |
424 | struct list_head *old; | 424 | struct list_head *old, *chpfirst; |
425 | struct list_head __percpu *chpfirst; | ||
426 | 425 | ||
427 | local_irq_save(flags); | 426 | local_irq_save(flags); |
428 | 427 | ||
@@ -432,7 +431,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item, | |||
432 | else /* put on front */ | 431 | else /* put on front */ |
433 | list_add_tail(new_item, chpfirst); | 432 | list_add_tail(new_item, chpfirst); |
434 | 433 | ||
435 | __this_cpu_write(chpfirst, new_item); | 434 | __this_cpu_write(cache->percpu->first, new_item); |
436 | __this_cpu_inc(cache->percpu->count); | 435 | __this_cpu_inc(cache->percpu->count); |
437 | 436 | ||
438 | if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) | 437 | if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) |
@@ -452,7 +451,7 @@ static void rds_ib_recv_cache_put(struct list_head *new_item, | |||
452 | } while (old); | 451 | } while (old); |
453 | 452 | ||
454 | 453 | ||
455 | __this_cpu_write(chpfirst, NULL); | 454 | __this_cpu_write(cache->percpu->first, NULL); |
456 | __this_cpu_write(cache->percpu->count, 0); | 455 | __this_cpu_write(cache->percpu->count, 0); |
457 | end: | 456 | end: |
458 | local_irq_restore(flags); | 457 | local_irq_restore(flags); |
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 5620d3c07479..bd2a5b90400c 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c | |||
@@ -25,15 +25,15 @@ | |||
25 | #include <linux/clk.h> | 25 | #include <linux/clk.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/acpi.h> | 27 | #include <linux/acpi.h> |
28 | #include <linux/acpi_gpio.h> | 28 | #include <linux/gpio/consumer.h> |
29 | 29 | ||
30 | #include <linux/rfkill-gpio.h> | 30 | #include <linux/rfkill-gpio.h> |
31 | 31 | ||
32 | struct rfkill_gpio_data { | 32 | struct rfkill_gpio_data { |
33 | const char *name; | 33 | const char *name; |
34 | enum rfkill_type type; | 34 | enum rfkill_type type; |
35 | int reset_gpio; | 35 | struct gpio_desc *reset_gpio; |
36 | int shutdown_gpio; | 36 | struct gpio_desc *shutdown_gpio; |
37 | 37 | ||
38 | struct rfkill *rfkill_dev; | 38 | struct rfkill *rfkill_dev; |
39 | char *reset_name; | 39 | char *reset_name; |
@@ -48,19 +48,15 @@ static int rfkill_gpio_set_power(void *data, bool blocked) | |||
48 | struct rfkill_gpio_data *rfkill = data; | 48 | struct rfkill_gpio_data *rfkill = data; |
49 | 49 | ||
50 | if (blocked) { | 50 | if (blocked) { |
51 | if (gpio_is_valid(rfkill->shutdown_gpio)) | 51 | gpiod_set_value(rfkill->shutdown_gpio, 0); |
52 | gpio_set_value(rfkill->shutdown_gpio, 0); | 52 | gpiod_set_value(rfkill->reset_gpio, 0); |
53 | if (gpio_is_valid(rfkill->reset_gpio)) | ||
54 | gpio_set_value(rfkill->reset_gpio, 0); | ||
55 | if (!IS_ERR(rfkill->clk) && rfkill->clk_enabled) | 53 | if (!IS_ERR(rfkill->clk) && rfkill->clk_enabled) |
56 | clk_disable(rfkill->clk); | 54 | clk_disable(rfkill->clk); |
57 | } else { | 55 | } else { |
58 | if (!IS_ERR(rfkill->clk) && !rfkill->clk_enabled) | 56 | if (!IS_ERR(rfkill->clk) && !rfkill->clk_enabled) |
59 | clk_enable(rfkill->clk); | 57 | clk_enable(rfkill->clk); |
60 | if (gpio_is_valid(rfkill->reset_gpio)) | 58 | gpiod_set_value(rfkill->reset_gpio, 1); |
61 | gpio_set_value(rfkill->reset_gpio, 1); | 59 | gpiod_set_value(rfkill->shutdown_gpio, 1); |
62 | if (gpio_is_valid(rfkill->shutdown_gpio)) | ||
63 | gpio_set_value(rfkill->shutdown_gpio, 1); | ||
64 | } | 60 | } |
65 | 61 | ||
66 | rfkill->clk_enabled = blocked; | 62 | rfkill->clk_enabled = blocked; |
@@ -83,8 +79,6 @@ static int rfkill_gpio_acpi_probe(struct device *dev, | |||
83 | 79 | ||
84 | rfkill->name = dev_name(dev); | 80 | rfkill->name = dev_name(dev); |
85 | rfkill->type = (unsigned)id->driver_data; | 81 | rfkill->type = (unsigned)id->driver_data; |
86 | rfkill->reset_gpio = acpi_get_gpio_by_index(dev, 0, NULL); | ||
87 | rfkill->shutdown_gpio = acpi_get_gpio_by_index(dev, 1, NULL); | ||
88 | 82 | ||
89 | return 0; | 83 | return 0; |
90 | } | 84 | } |
@@ -94,8 +88,9 @@ static int rfkill_gpio_probe(struct platform_device *pdev) | |||
94 | struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data; | 88 | struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data; |
95 | struct rfkill_gpio_data *rfkill; | 89 | struct rfkill_gpio_data *rfkill; |
96 | const char *clk_name = NULL; | 90 | const char *clk_name = NULL; |
97 | int ret = 0; | 91 | struct gpio_desc *gpio; |
98 | int len = 0; | 92 | int ret; |
93 | int len; | ||
99 | 94 | ||
100 | rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL); | 95 | rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL); |
101 | if (!rfkill) | 96 | if (!rfkill) |
@@ -109,28 +104,10 @@ static int rfkill_gpio_probe(struct platform_device *pdev) | |||
109 | clk_name = pdata->power_clk_name; | 104 | clk_name = pdata->power_clk_name; |
110 | rfkill->name = pdata->name; | 105 | rfkill->name = pdata->name; |
111 | rfkill->type = pdata->type; | 106 | rfkill->type = pdata->type; |
112 | rfkill->reset_gpio = pdata->reset_gpio; | ||
113 | rfkill->shutdown_gpio = pdata->shutdown_gpio; | ||
114 | } else { | 107 | } else { |
115 | return -ENODEV; | 108 | return -ENODEV; |
116 | } | 109 | } |
117 | 110 | ||
118 | /* make sure at-least one of the GPIO is defined and that | ||
119 | * a name is specified for this instance */ | ||
120 | if ((!gpio_is_valid(rfkill->reset_gpio) && | ||
121 | !gpio_is_valid(rfkill->shutdown_gpio)) || !rfkill->name) { | ||
122 | pr_warn("%s: invalid platform data\n", __func__); | ||
123 | return -EINVAL; | ||
124 | } | ||
125 | |||
126 | if (pdata && pdata->gpio_runtime_setup) { | ||
127 | ret = pdata->gpio_runtime_setup(pdev); | ||
128 | if (ret) { | ||
129 | pr_warn("%s: can't set up gpio\n", __func__); | ||
130 | return ret; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | len = strlen(rfkill->name); | 111 | len = strlen(rfkill->name); |
135 | rfkill->reset_name = devm_kzalloc(&pdev->dev, len + 7, GFP_KERNEL); | 112 | rfkill->reset_name = devm_kzalloc(&pdev->dev, len + 7, GFP_KERNEL); |
136 | if (!rfkill->reset_name) | 113 | if (!rfkill->reset_name) |
@@ -145,20 +122,34 @@ static int rfkill_gpio_probe(struct platform_device *pdev) | |||
145 | 122 | ||
146 | rfkill->clk = devm_clk_get(&pdev->dev, clk_name); | 123 | rfkill->clk = devm_clk_get(&pdev->dev, clk_name); |
147 | 124 | ||
148 | if (gpio_is_valid(rfkill->reset_gpio)) { | 125 | gpio = devm_gpiod_get_index(&pdev->dev, rfkill->reset_name, 0); |
149 | ret = devm_gpio_request_one(&pdev->dev, rfkill->reset_gpio, | 126 | if (!IS_ERR(gpio)) { |
150 | 0, rfkill->reset_name); | 127 | ret = gpiod_direction_output(gpio, 0); |
151 | if (ret) { | 128 | if (ret) |
152 | pr_warn("%s: failed to get reset gpio.\n", __func__); | ||
153 | return ret; | 129 | return ret; |
154 | } | 130 | rfkill->reset_gpio = gpio; |
131 | } | ||
132 | |||
133 | gpio = devm_gpiod_get_index(&pdev->dev, rfkill->shutdown_name, 1); | ||
134 | if (!IS_ERR(gpio)) { | ||
135 | ret = gpiod_direction_output(gpio, 0); | ||
136 | if (ret) | ||
137 | return ret; | ||
138 | rfkill->shutdown_gpio = gpio; | ||
155 | } | 139 | } |
156 | 140 | ||
157 | if (gpio_is_valid(rfkill->shutdown_gpio)) { | 141 | /* Make sure at-least one of the GPIO is defined and that |
158 | ret = devm_gpio_request_one(&pdev->dev, rfkill->shutdown_gpio, | 142 | * a name is specified for this instance |
159 | 0, rfkill->shutdown_name); | 143 | */ |
144 | if ((!rfkill->reset_gpio && !rfkill->shutdown_gpio) || !rfkill->name) { | ||
145 | dev_err(&pdev->dev, "invalid platform data\n"); | ||
146 | return -EINVAL; | ||
147 | } | ||
148 | |||
149 | if (pdata && pdata->gpio_runtime_setup) { | ||
150 | ret = pdata->gpio_runtime_setup(pdev); | ||
160 | if (ret) { | 151 | if (ret) { |
161 | pr_warn("%s: failed to get shutdown gpio.\n", __func__); | 152 | dev_err(&pdev->dev, "can't set up gpio\n"); |
162 | return ret; | 153 | return ret; |
163 | } | 154 | } |
164 | } | 155 | } |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 33af77246bfe..62ced6516c58 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1253 | 1253 | ||
1254 | if (msg->msg_name) { | 1254 | if (msg->msg_name) { |
1255 | struct sockaddr_rose *srose; | 1255 | struct sockaddr_rose *srose; |
1256 | struct full_sockaddr_rose *full_srose = msg->msg_name; | ||
1256 | 1257 | ||
1257 | memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); | 1258 | memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); |
1258 | srose = msg->msg_name; | 1259 | srose = msg->msg_name; |
@@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1260 | srose->srose_addr = rose->dest_addr; | 1261 | srose->srose_addr = rose->dest_addr; |
1261 | srose->srose_call = rose->dest_call; | 1262 | srose->srose_call = rose->dest_call; |
1262 | srose->srose_ndigis = rose->dest_ndigis; | 1263 | srose->srose_ndigis = rose->dest_ndigis; |
1263 | if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { | 1264 | for (n = 0 ; n < rose->dest_ndigis ; n++) |
1264 | struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; | 1265 | full_srose->srose_digis[n] = rose->dest_digis[n]; |
1265 | for (n = 0 ; n < rose->dest_ndigis ; n++) | 1266 | msg->msg_namelen = sizeof(struct full_sockaddr_rose); |
1266 | full_srose->srose_digis[n] = rose->dest_digis[n]; | ||
1267 | msg->msg_namelen = sizeof(struct full_sockaddr_rose); | ||
1268 | } else { | ||
1269 | if (rose->dest_ndigis >= 1) { | ||
1270 | srose->srose_ndigis = 1; | ||
1271 | srose->srose_digi = rose->dest_digis[0]; | ||
1272 | } | ||
1273 | msg->msg_namelen = sizeof(struct sockaddr_rose); | ||
1274 | } | ||
1275 | } | 1267 | } |
1276 | 1268 | ||
1277 | skb_free_datagram(sk, skb); | 1269 | skb_free_datagram(sk, skb); |
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 5c5edf56adbd..11fe1a416433 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
@@ -77,16 +77,16 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est, | |||
77 | &csum_idx_gen, &csum_hash_info); | 77 | &csum_idx_gen, &csum_hash_info); |
78 | if (IS_ERR(pc)) | 78 | if (IS_ERR(pc)) |
79 | return PTR_ERR(pc); | 79 | return PTR_ERR(pc); |
80 | p = to_tcf_csum(pc); | ||
81 | ret = ACT_P_CREATED; | 80 | ret = ACT_P_CREATED; |
82 | } else { | 81 | } else { |
83 | p = to_tcf_csum(pc); | 82 | if (bind)/* dont override defaults */ |
84 | if (!ovr) { | 83 | return 0; |
85 | tcf_hash_release(pc, bind, &csum_hash_info); | 84 | tcf_hash_release(pc, bind, &csum_hash_info); |
85 | if (!ovr) | ||
86 | return -EEXIST; | 86 | return -EEXIST; |
87 | } | ||
88 | } | 87 | } |
89 | 88 | ||
89 | p = to_tcf_csum(pc); | ||
90 | spin_lock_bh(&p->tcf_lock); | 90 | spin_lock_bh(&p->tcf_lock); |
91 | p->tcf_action = parm->action; | 91 | p->tcf_action = parm->action; |
92 | p->update_flags = parm->update_flags; | 92 | p->update_flags = parm->update_flags; |
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 5645a4d32abd..eb9ba60ebab4 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -102,10 +102,11 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla, | |||
102 | return PTR_ERR(pc); | 102 | return PTR_ERR(pc); |
103 | ret = ACT_P_CREATED; | 103 | ret = ACT_P_CREATED; |
104 | } else { | 104 | } else { |
105 | if (!ovr) { | 105 | if (bind)/* dont override defaults */ |
106 | tcf_hash_release(pc, bind, &gact_hash_info); | 106 | return 0; |
107 | tcf_hash_release(pc, bind, &gact_hash_info); | ||
108 | if (!ovr) | ||
107 | return -EEXIST; | 109 | return -EEXIST; |
108 | } | ||
109 | } | 110 | } |
110 | 111 | ||
111 | gact = to_gact(pc); | 112 | gact = to_gact(pc); |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 882a89762f77..dcbfe8ce04a6 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -141,10 +141,12 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est, | |||
141 | return PTR_ERR(pc); | 141 | return PTR_ERR(pc); |
142 | ret = ACT_P_CREATED; | 142 | ret = ACT_P_CREATED; |
143 | } else { | 143 | } else { |
144 | if (!ovr) { | 144 | if (bind)/* dont override defaults */ |
145 | tcf_ipt_release(to_ipt(pc), bind); | 145 | return 0; |
146 | tcf_ipt_release(to_ipt(pc), bind); | ||
147 | |||
148 | if (!ovr) | ||
146 | return -EEXIST; | 149 | return -EEXIST; |
147 | } | ||
148 | } | 150 | } |
149 | ipt = to_ipt(pc); | 151 | ipt = to_ipt(pc); |
150 | 152 | ||
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 6a15ace00241..76869538d028 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -70,15 +70,15 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, | |||
70 | &nat_idx_gen, &nat_hash_info); | 70 | &nat_idx_gen, &nat_hash_info); |
71 | if (IS_ERR(pc)) | 71 | if (IS_ERR(pc)) |
72 | return PTR_ERR(pc); | 72 | return PTR_ERR(pc); |
73 | p = to_tcf_nat(pc); | ||
74 | ret = ACT_P_CREATED; | 73 | ret = ACT_P_CREATED; |
75 | } else { | 74 | } else { |
76 | p = to_tcf_nat(pc); | 75 | if (bind) |
77 | if (!ovr) { | 76 | return 0; |
78 | tcf_hash_release(pc, bind, &nat_hash_info); | 77 | tcf_hash_release(pc, bind, &nat_hash_info); |
78 | if (!ovr) | ||
79 | return -EEXIST; | 79 | return -EEXIST; |
80 | } | ||
81 | } | 80 | } |
81 | p = to_tcf_nat(pc); | ||
82 | 82 | ||
83 | spin_lock_bh(&p->tcf_lock); | 83 | spin_lock_bh(&p->tcf_lock); |
84 | p->old_addr = parm->old_addr; | 84 | p->old_addr = parm->old_addr; |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 03b67674169c..7aa2dcd989f8 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -84,10 +84,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, | |||
84 | ret = ACT_P_CREATED; | 84 | ret = ACT_P_CREATED; |
85 | } else { | 85 | } else { |
86 | p = to_pedit(pc); | 86 | p = to_pedit(pc); |
87 | if (!ovr) { | 87 | tcf_hash_release(pc, bind, &pedit_hash_info); |
88 | tcf_hash_release(pc, bind, &pedit_hash_info); | 88 | if (bind) |
89 | return 0; | ||
90 | if (!ovr) | ||
89 | return -EEXIST; | 91 | return -EEXIST; |
90 | } | 92 | |
91 | if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { | 93 | if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { |
92 | keys = kmalloc(ksize, GFP_KERNEL); | 94 | keys = kmalloc(ksize, GFP_KERNEL); |
93 | if (keys == NULL) | 95 | if (keys == NULL) |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 16a62c36928a..ef246d87e68b 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -177,10 +177,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
177 | if (bind) { | 177 | if (bind) { |
178 | police->tcf_bindcnt += 1; | 178 | police->tcf_bindcnt += 1; |
179 | police->tcf_refcnt += 1; | 179 | police->tcf_refcnt += 1; |
180 | return 0; | ||
180 | } | 181 | } |
181 | if (ovr) | 182 | if (ovr) |
182 | goto override; | 183 | goto override; |
183 | return ret; | 184 | /* not replacing */ |
185 | return -EEXIST; | ||
184 | } | 186 | } |
185 | } | 187 | } |
186 | 188 | ||
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 31157d3e729c..f7b45ab85388 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -142,10 +142,13 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, | |||
142 | ret = ACT_P_CREATED; | 142 | ret = ACT_P_CREATED; |
143 | } else { | 143 | } else { |
144 | d = to_defact(pc); | 144 | d = to_defact(pc); |
145 | if (!ovr) { | 145 | |
146 | tcf_simp_release(d, bind); | 146 | if (bind) |
147 | return 0; | ||
148 | tcf_simp_release(d, bind); | ||
149 | if (!ovr) | ||
147 | return -EEXIST; | 150 | return -EEXIST; |
148 | } | 151 | |
149 | reset_policy(d, defdata, parm); | 152 | reset_policy(d, defdata, parm); |
150 | } | 153 | } |
151 | 154 | ||
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 35ea643b4325..8fe9d25c3008 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -120,10 +120,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, | |||
120 | ret = ACT_P_CREATED; | 120 | ret = ACT_P_CREATED; |
121 | } else { | 121 | } else { |
122 | d = to_skbedit(pc); | 122 | d = to_skbedit(pc); |
123 | if (!ovr) { | 123 | if (bind) |
124 | tcf_hash_release(pc, bind, &skbedit_hash_info); | 124 | return 0; |
125 | tcf_hash_release(pc, bind, &skbedit_hash_info); | ||
126 | if (!ovr) | ||
125 | return -EEXIST; | 127 | return -EEXIST; |
126 | } | ||
127 | } | 128 | } |
128 | 129 | ||
129 | spin_lock_bh(&d->tcf_lock); | 130 | spin_lock_bh(&d->tcf_lock); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 922a09406ba7..7fc899a943a8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
126 | 126 | ||
127 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 127 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
128 | if (!netif_xmit_frozen_or_stopped(txq)) | 128 | if (!netif_xmit_frozen_or_stopped(txq)) |
129 | ret = dev_hard_start_xmit(skb, dev, txq, NULL); | 129 | ret = dev_hard_start_xmit(skb, dev, txq); |
130 | 130 | ||
131 | HARD_TX_UNLOCK(dev, txq); | 131 | HARD_TX_UNLOCK(dev, txq); |
132 | 132 | ||
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index f51ba985a36e..59268f6e2c36 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -208,8 +208,6 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) | |||
208 | INIT_LIST_HEAD(&q->retransmit); | 208 | INIT_LIST_HEAD(&q->retransmit); |
209 | INIT_LIST_HEAD(&q->sacked); | 209 | INIT_LIST_HEAD(&q->sacked); |
210 | INIT_LIST_HEAD(&q->abandoned); | 210 | INIT_LIST_HEAD(&q->abandoned); |
211 | |||
212 | q->empty = 1; | ||
213 | } | 211 | } |
214 | 212 | ||
215 | /* Free the outqueue structure and any related pending chunks. | 213 | /* Free the outqueue structure and any related pending chunks. |
@@ -332,7 +330,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
332 | SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); | 330 | SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); |
333 | else | 331 | else |
334 | SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); | 332 | SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); |
335 | q->empty = 0; | ||
336 | break; | 333 | break; |
337 | } | 334 | } |
338 | } else { | 335 | } else { |
@@ -654,7 +651,6 @@ redo: | |||
654 | if (chunk->fast_retransmit == SCTP_NEED_FRTX) | 651 | if (chunk->fast_retransmit == SCTP_NEED_FRTX) |
655 | chunk->fast_retransmit = SCTP_DONT_FRTX; | 652 | chunk->fast_retransmit = SCTP_DONT_FRTX; |
656 | 653 | ||
657 | q->empty = 0; | ||
658 | q->asoc->stats.rtxchunks++; | 654 | q->asoc->stats.rtxchunks++; |
659 | break; | 655 | break; |
660 | } | 656 | } |
@@ -1065,8 +1061,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | |||
1065 | 1061 | ||
1066 | sctp_transport_reset_timers(transport); | 1062 | sctp_transport_reset_timers(transport); |
1067 | 1063 | ||
1068 | q->empty = 0; | ||
1069 | |||
1070 | /* Only let one DATA chunk get bundled with a | 1064 | /* Only let one DATA chunk get bundled with a |
1071 | * COOKIE-ECHO chunk. | 1065 | * COOKIE-ECHO chunk. |
1072 | */ | 1066 | */ |
@@ -1275,29 +1269,17 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
1275 | "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, | 1269 | "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, |
1276 | asoc->adv_peer_ack_point); | 1270 | asoc->adv_peer_ack_point); |
1277 | 1271 | ||
1278 | /* See if all chunks are acked. | 1272 | return sctp_outq_is_empty(q); |
1279 | * Make sure the empty queue handler will get run later. | ||
1280 | */ | ||
1281 | q->empty = (list_empty(&q->out_chunk_list) && | ||
1282 | list_empty(&q->retransmit)); | ||
1283 | if (!q->empty) | ||
1284 | goto finish; | ||
1285 | |||
1286 | list_for_each_entry(transport, transport_list, transports) { | ||
1287 | q->empty = q->empty && list_empty(&transport->transmitted); | ||
1288 | if (!q->empty) | ||
1289 | goto finish; | ||
1290 | } | ||
1291 | |||
1292 | pr_debug("%s: sack queue is empty\n", __func__); | ||
1293 | finish: | ||
1294 | return q->empty; | ||
1295 | } | 1273 | } |
1296 | 1274 | ||
1297 | /* Is the outqueue empty? */ | 1275 | /* Is the outqueue empty? |
1276 | * The queue is empty when we have not pending data, no in-flight data | ||
1277 | * and nothing pending retransmissions. | ||
1278 | */ | ||
1298 | int sctp_outq_is_empty(const struct sctp_outq *q) | 1279 | int sctp_outq_is_empty(const struct sctp_outq *q) |
1299 | { | 1280 | { |
1300 | return q->empty; | 1281 | return q->out_qlen == 0 && q->outstanding_bytes == 0 && |
1282 | list_empty(&q->retransmit); | ||
1301 | } | 1283 | } |
1302 | 1284 | ||
1303 | /******************************************************************** | 1285 | /******************************************************************** |
diff --git a/net/sctp/probe.c b/net/sctp/probe.c index 53c452efb40b..5e68b94ee640 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <net/sctp/sctp.h> | 38 | #include <net/sctp/sctp.h> |
39 | #include <net/sctp/sm.h> | 39 | #include <net/sctp/sm.h> |
40 | 40 | ||
41 | MODULE_SOFTDEP("pre: sctp"); | ||
41 | MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); | 42 | MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); |
42 | MODULE_DESCRIPTION("SCTP snooper"); | 43 | MODULE_DESCRIPTION("SCTP snooper"); |
43 | MODULE_LICENSE("GPL"); | 44 | MODULE_LICENSE("GPL"); |
@@ -182,6 +183,20 @@ static struct jprobe sctp_recv_probe = { | |||
182 | .entry = jsctp_sf_eat_sack, | 183 | .entry = jsctp_sf_eat_sack, |
183 | }; | 184 | }; |
184 | 185 | ||
186 | static __init int sctp_setup_jprobe(void) | ||
187 | { | ||
188 | int ret = register_jprobe(&sctp_recv_probe); | ||
189 | |||
190 | if (ret) { | ||
191 | if (request_module("sctp")) | ||
192 | goto out; | ||
193 | ret = register_jprobe(&sctp_recv_probe); | ||
194 | } | ||
195 | |||
196 | out: | ||
197 | return ret; | ||
198 | } | ||
199 | |||
185 | static __init int sctpprobe_init(void) | 200 | static __init int sctpprobe_init(void) |
186 | { | 201 | { |
187 | int ret = -ENOMEM; | 202 | int ret = -ENOMEM; |
@@ -202,7 +217,7 @@ static __init int sctpprobe_init(void) | |||
202 | &sctpprobe_fops)) | 217 | &sctpprobe_fops)) |
203 | goto free_kfifo; | 218 | goto free_kfifo; |
204 | 219 | ||
205 | ret = register_jprobe(&sctp_recv_probe); | 220 | ret = sctp_setup_jprobe(); |
206 | if (ret) | 221 | if (ret) |
207 | goto remove_proc; | 222 | goto remove_proc; |
208 | 223 | ||
diff --git a/net/tipc/link.c b/net/tipc/link.c index 69cd9bf3f561..13b987745820 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1498,6 +1498,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) | |||
1498 | int type; | 1498 | int type; |
1499 | 1499 | ||
1500 | head = head->next; | 1500 | head = head->next; |
1501 | buf->next = NULL; | ||
1501 | 1502 | ||
1502 | /* Ensure bearer is still enabled */ | 1503 | /* Ensure bearer is still enabled */ |
1503 | if (unlikely(!b_ptr->active)) | 1504 | if (unlikely(!b_ptr->active)) |
diff --git a/net/tipc/port.c b/net/tipc/port.c index c081a7632302..d43f3182b1d4 100644 --- a/net/tipc/port.c +++ b/net/tipc/port.c | |||
@@ -251,18 +251,15 @@ struct tipc_port *tipc_createport(struct sock *sk, | |||
251 | return p_ptr; | 251 | return p_ptr; |
252 | } | 252 | } |
253 | 253 | ||
254 | int tipc_deleteport(u32 ref) | 254 | int tipc_deleteport(struct tipc_port *p_ptr) |
255 | { | 255 | { |
256 | struct tipc_port *p_ptr; | ||
257 | struct sk_buff *buf = NULL; | 256 | struct sk_buff *buf = NULL; |
258 | 257 | ||
259 | tipc_withdraw(ref, 0, NULL); | 258 | tipc_withdraw(p_ptr, 0, NULL); |
260 | p_ptr = tipc_port_lock(ref); | ||
261 | if (!p_ptr) | ||
262 | return -EINVAL; | ||
263 | 259 | ||
264 | tipc_ref_discard(ref); | 260 | spin_lock_bh(p_ptr->lock); |
265 | tipc_port_unlock(p_ptr); | 261 | tipc_ref_discard(p_ptr->ref); |
262 | spin_unlock_bh(p_ptr->lock); | ||
266 | 263 | ||
267 | k_cancel_timer(&p_ptr->timer); | 264 | k_cancel_timer(&p_ptr->timer); |
268 | if (p_ptr->connected) { | 265 | if (p_ptr->connected) { |
@@ -704,47 +701,36 @@ int tipc_set_portimportance(u32 ref, unsigned int imp) | |||
704 | } | 701 | } |
705 | 702 | ||
706 | 703 | ||
707 | int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | 704 | int tipc_publish(struct tipc_port *p_ptr, unsigned int scope, |
705 | struct tipc_name_seq const *seq) | ||
708 | { | 706 | { |
709 | struct tipc_port *p_ptr; | ||
710 | struct publication *publ; | 707 | struct publication *publ; |
711 | u32 key; | 708 | u32 key; |
712 | int res = -EINVAL; | ||
713 | 709 | ||
714 | p_ptr = tipc_port_lock(ref); | 710 | if (p_ptr->connected) |
715 | if (!p_ptr) | ||
716 | return -EINVAL; | 711 | return -EINVAL; |
712 | key = p_ptr->ref + p_ptr->pub_count + 1; | ||
713 | if (key == p_ptr->ref) | ||
714 | return -EADDRINUSE; | ||
717 | 715 | ||
718 | if (p_ptr->connected) | ||
719 | goto exit; | ||
720 | key = ref + p_ptr->pub_count + 1; | ||
721 | if (key == ref) { | ||
722 | res = -EADDRINUSE; | ||
723 | goto exit; | ||
724 | } | ||
725 | publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, | 716 | publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, |
726 | scope, p_ptr->ref, key); | 717 | scope, p_ptr->ref, key); |
727 | if (publ) { | 718 | if (publ) { |
728 | list_add(&publ->pport_list, &p_ptr->publications); | 719 | list_add(&publ->pport_list, &p_ptr->publications); |
729 | p_ptr->pub_count++; | 720 | p_ptr->pub_count++; |
730 | p_ptr->published = 1; | 721 | p_ptr->published = 1; |
731 | res = 0; | 722 | return 0; |
732 | } | 723 | } |
733 | exit: | 724 | return -EINVAL; |
734 | tipc_port_unlock(p_ptr); | ||
735 | return res; | ||
736 | } | 725 | } |
737 | 726 | ||
738 | int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | 727 | int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope, |
728 | struct tipc_name_seq const *seq) | ||
739 | { | 729 | { |
740 | struct tipc_port *p_ptr; | ||
741 | struct publication *publ; | 730 | struct publication *publ; |
742 | struct publication *tpubl; | 731 | struct publication *tpubl; |
743 | int res = -EINVAL; | 732 | int res = -EINVAL; |
744 | 733 | ||
745 | p_ptr = tipc_port_lock(ref); | ||
746 | if (!p_ptr) | ||
747 | return -EINVAL; | ||
748 | if (!seq) { | 734 | if (!seq) { |
749 | list_for_each_entry_safe(publ, tpubl, | 735 | list_for_each_entry_safe(publ, tpubl, |
750 | &p_ptr->publications, pport_list) { | 736 | &p_ptr->publications, pport_list) { |
@@ -771,7 +757,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) | |||
771 | } | 757 | } |
772 | if (list_empty(&p_ptr->publications)) | 758 | if (list_empty(&p_ptr->publications)) |
773 | p_ptr->published = 0; | 759 | p_ptr->published = 0; |
774 | tipc_port_unlock(p_ptr); | ||
775 | return res; | 760 | return res; |
776 | } | 761 | } |
777 | 762 | ||
diff --git a/net/tipc/port.h b/net/tipc/port.h index 912253597343..34f12bd4074e 100644 --- a/net/tipc/port.h +++ b/net/tipc/port.h | |||
@@ -116,7 +116,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err); | |||
116 | 116 | ||
117 | void tipc_acknowledge(u32 port_ref, u32 ack); | 117 | void tipc_acknowledge(u32 port_ref, u32 ack); |
118 | 118 | ||
119 | int tipc_deleteport(u32 portref); | 119 | int tipc_deleteport(struct tipc_port *p_ptr); |
120 | 120 | ||
121 | int tipc_portimportance(u32 portref, unsigned int *importance); | 121 | int tipc_portimportance(u32 portref, unsigned int *importance); |
122 | int tipc_set_portimportance(u32 portref, unsigned int importance); | 122 | int tipc_set_portimportance(u32 portref, unsigned int importance); |
@@ -127,9 +127,9 @@ int tipc_set_portunreliable(u32 portref, unsigned int isunreliable); | |||
127 | int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable); | 127 | int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable); |
128 | int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable); | 128 | int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable); |
129 | 129 | ||
130 | int tipc_publish(u32 portref, unsigned int scope, | 130 | int tipc_publish(struct tipc_port *p_ptr, unsigned int scope, |
131 | struct tipc_name_seq const *name_seq); | 131 | struct tipc_name_seq const *name_seq); |
132 | int tipc_withdraw(u32 portref, unsigned int scope, | 132 | int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope, |
133 | struct tipc_name_seq const *name_seq); | 133 | struct tipc_name_seq const *name_seq); |
134 | 134 | ||
135 | int tipc_connect(u32 portref, struct tipc_portid const *port); | 135 | int tipc_connect(u32 portref, struct tipc_portid const *port); |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 3b61851bb927..e741416d1d24 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -354,7 +354,7 @@ static int release(struct socket *sock) | |||
354 | * Delete TIPC port; this ensures no more messages are queued | 354 | * Delete TIPC port; this ensures no more messages are queued |
355 | * (also disconnects an active connection & sends a 'FIN-' to peer) | 355 | * (also disconnects an active connection & sends a 'FIN-' to peer) |
356 | */ | 356 | */ |
357 | res = tipc_deleteport(tport->ref); | 357 | res = tipc_deleteport(tport); |
358 | 358 | ||
359 | /* Discard any remaining (connection-based) messages in receive queue */ | 359 | /* Discard any remaining (connection-based) messages in receive queue */ |
360 | __skb_queue_purge(&sk->sk_receive_queue); | 360 | __skb_queue_purge(&sk->sk_receive_queue); |
@@ -386,30 +386,46 @@ static int release(struct socket *sock) | |||
386 | */ | 386 | */ |
387 | static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) | 387 | static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) |
388 | { | 388 | { |
389 | struct sock *sk = sock->sk; | ||
389 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; | 390 | struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; |
390 | u32 portref = tipc_sk_port(sock->sk)->ref; | 391 | struct tipc_port *tport = tipc_sk_port(sock->sk); |
392 | int res = -EINVAL; | ||
391 | 393 | ||
392 | if (unlikely(!uaddr_len)) | 394 | lock_sock(sk); |
393 | return tipc_withdraw(portref, 0, NULL); | 395 | if (unlikely(!uaddr_len)) { |
396 | res = tipc_withdraw(tport, 0, NULL); | ||
397 | goto exit; | ||
398 | } | ||
394 | 399 | ||
395 | if (uaddr_len < sizeof(struct sockaddr_tipc)) | 400 | if (uaddr_len < sizeof(struct sockaddr_tipc)) { |
396 | return -EINVAL; | 401 | res = -EINVAL; |
397 | if (addr->family != AF_TIPC) | 402 | goto exit; |
398 | return -EAFNOSUPPORT; | 403 | } |
404 | if (addr->family != AF_TIPC) { | ||
405 | res = -EAFNOSUPPORT; | ||
406 | goto exit; | ||
407 | } | ||
399 | 408 | ||
400 | if (addr->addrtype == TIPC_ADDR_NAME) | 409 | if (addr->addrtype == TIPC_ADDR_NAME) |
401 | addr->addr.nameseq.upper = addr->addr.nameseq.lower; | 410 | addr->addr.nameseq.upper = addr->addr.nameseq.lower; |
402 | else if (addr->addrtype != TIPC_ADDR_NAMESEQ) | 411 | else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { |
403 | return -EAFNOSUPPORT; | 412 | res = -EAFNOSUPPORT; |
413 | goto exit; | ||
414 | } | ||
404 | 415 | ||
405 | if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && | 416 | if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) && |
406 | (addr->addr.nameseq.type != TIPC_TOP_SRV) && | 417 | (addr->addr.nameseq.type != TIPC_TOP_SRV) && |
407 | (addr->addr.nameseq.type != TIPC_CFG_SRV)) | 418 | (addr->addr.nameseq.type != TIPC_CFG_SRV)) { |
408 | return -EACCES; | 419 | res = -EACCES; |
420 | goto exit; | ||
421 | } | ||
409 | 422 | ||
410 | return (addr->scope > 0) ? | 423 | res = (addr->scope > 0) ? |
411 | tipc_publish(portref, addr->scope, &addr->addr.nameseq) : | 424 | tipc_publish(tport, addr->scope, &addr->addr.nameseq) : |
412 | tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq); | 425 | tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq); |
426 | exit: | ||
427 | release_sock(sk); | ||
428 | return res; | ||
413 | } | 429 | } |
414 | 430 | ||
415 | /** | 431 | /** |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index a0ca162e5bd5..a427623ee574 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -718,7 +718,9 @@ static int unix_autobind(struct socket *sock) | |||
718 | int err; | 718 | int err; |
719 | unsigned int retries = 0; | 719 | unsigned int retries = 0; |
720 | 720 | ||
721 | mutex_lock(&u->readlock); | 721 | err = mutex_lock_interruptible(&u->readlock); |
722 | if (err) | ||
723 | return err; | ||
722 | 724 | ||
723 | err = 0; | 725 | err = 0; |
724 | if (u->addr) | 726 | if (u->addr) |
@@ -877,7 +879,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
877 | goto out; | 879 | goto out; |
878 | addr_len = err; | 880 | addr_len = err; |
879 | 881 | ||
880 | mutex_lock(&u->readlock); | 882 | err = mutex_lock_interruptible(&u->readlock); |
883 | if (err) | ||
884 | goto out; | ||
881 | 885 | ||
882 | err = -EINVAL; | 886 | err = -EINVAL; |
883 | if (u->addr) | 887 | if (u->addr) |
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c index a271c27fac77..722da616438c 100644 --- a/net/wireless/radiotap.c +++ b/net/wireless/radiotap.c | |||
@@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init( | |||
124 | /* find payload start allowing for extended bitmap(s) */ | 124 | /* find payload start allowing for extended bitmap(s) */ |
125 | 125 | ||
126 | if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { | 126 | if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { |
127 | if ((unsigned long)iterator->_arg - | ||
128 | (unsigned long)iterator->_rtheader + sizeof(uint32_t) > | ||
129 | (unsigned long)iterator->_max_length) | ||
130 | return -EINVAL; | ||
127 | while (get_unaligned_le32(iterator->_arg) & | 131 | while (get_unaligned_le32(iterator->_arg) & |
128 | (1 << IEEE80211_RADIOTAP_EXT)) { | 132 | (1 << IEEE80211_RADIOTAP_EXT)) { |
129 | iterator->_arg += sizeof(uint32_t); | 133 | iterator->_arg += sizeof(uint32_t); |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 65f800890d70..d3c5bd7c6b51 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -632,6 +632,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
632 | } | 632 | } |
633 | #endif | 633 | #endif |
634 | 634 | ||
635 | if (!bss && (status == WLAN_STATUS_SUCCESS)) { | ||
636 | WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect); | ||
637 | bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, | ||
638 | wdev->ssid, wdev->ssid_len, | ||
639 | WLAN_CAPABILITY_ESS, | ||
640 | WLAN_CAPABILITY_ESS); | ||
641 | if (bss) | ||
642 | cfg80211_hold_bss(bss_from_pub(bss)); | ||
643 | } | ||
644 | |||
635 | if (wdev->current_bss) { | 645 | if (wdev->current_bss) { |
636 | cfg80211_unhold_bss(wdev->current_bss); | 646 | cfg80211_unhold_bss(wdev->current_bss); |
637 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); | 647 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); |
@@ -649,16 +659,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, | |||
649 | return; | 659 | return; |
650 | } | 660 | } |
651 | 661 | ||
652 | if (!bss) { | 662 | if (WARN_ON(!bss)) |
653 | WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect); | 663 | return; |
654 | bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, | ||
655 | wdev->ssid, wdev->ssid_len, | ||
656 | WLAN_CAPABILITY_ESS, | ||
657 | WLAN_CAPABILITY_ESS); | ||
658 | if (WARN_ON(!bss)) | ||
659 | return; | ||
660 | cfg80211_hold_bss(bss_from_pub(bss)); | ||
661 | } | ||
662 | 664 | ||
663 | wdev->current_bss = bss_from_pub(bss); | 665 | wdev->current_bss = bss_from_pub(bss); |
664 | 666 | ||