diff options
Diffstat (limited to 'net')
154 files changed, 1608 insertions, 1018 deletions
diff --git a/net/802/mrp.c b/net/802/mrp.c index a4cc3229952a..e085bcc754f6 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c | |||
| @@ -870,8 +870,12 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) | |||
| 870 | * all pending messages before the applicant is gone. | 870 | * all pending messages before the applicant is gone. |
| 871 | */ | 871 | */ |
| 872 | del_timer_sync(&app->join_timer); | 872 | del_timer_sync(&app->join_timer); |
| 873 | |||
| 874 | spin_lock(&app->lock); | ||
| 873 | mrp_mad_event(app, MRP_EVENT_TX); | 875 | mrp_mad_event(app, MRP_EVENT_TX); |
| 874 | mrp_pdu_queue(app); | 876 | mrp_pdu_queue(app); |
| 877 | spin_unlock(&app->lock); | ||
| 878 | |||
| 875 | mrp_queue_xmit(app); | 879 | mrp_queue_xmit(app); |
| 876 | 880 | ||
| 877 | dev_mc_del(dev, appl->group_address); | 881 | dev_mc_del(dev, appl->group_address); |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a18714469bf7..85addcd9372b 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
| @@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
| 86 | 86 | ||
| 87 | grp = &vlan_info->grp; | 87 | grp = &vlan_info->grp; |
| 88 | 88 | ||
| 89 | /* Take it out of our own structures, but be sure to interlock with | ||
| 90 | * HW accelerating devices or SW vlan input packet processing if | ||
| 91 | * VLAN is not 0 (leave it there for 802.1p). | ||
| 92 | */ | ||
| 93 | if (vlan_id) | ||
| 94 | vlan_vid_del(real_dev, vlan_id); | ||
| 95 | |||
| 96 | grp->nr_vlan_devs--; | 89 | grp->nr_vlan_devs--; |
| 97 | 90 | ||
| 98 | if (vlan->flags & VLAN_FLAG_MVRP) | 91 | if (vlan->flags & VLAN_FLAG_MVRP) |
| @@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
| 114 | vlan_gvrp_uninit_applicant(real_dev); | 107 | vlan_gvrp_uninit_applicant(real_dev); |
| 115 | } | 108 | } |
| 116 | 109 | ||
| 110 | /* Take it out of our own structures, but be sure to interlock with | ||
| 111 | * HW accelerating devices or SW vlan input packet processing if | ||
| 112 | * VLAN is not 0 (leave it there for 802.1p). | ||
| 113 | */ | ||
| 114 | if (vlan_id) | ||
| 115 | vlan_vid_del(real_dev, vlan_id); | ||
| 116 | |||
| 117 | /* Get rid of the vlan's reference to real_dev */ | 117 | /* Get rid of the vlan's reference to real_dev */ |
| 118 | dev_put(real_dev); | 118 | dev_put(real_dev); |
| 119 | } | 119 | } |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 74dea377fe5b..de2e950a0a7a 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
| @@ -655,7 +655,7 @@ static struct p9_trans_module p9_virtio_trans = { | |||
| 655 | .create = p9_virtio_create, | 655 | .create = p9_virtio_create, |
| 656 | .close = p9_virtio_close, | 656 | .close = p9_virtio_close, |
| 657 | .request = p9_virtio_request, | 657 | .request = p9_virtio_request, |
| 658 | //.zc_request = p9_virtio_zc_request, | 658 | .zc_request = p9_virtio_zc_request, |
| 659 | .cancel = p9_virtio_cancel, | 659 | .cancel = p9_virtio_cancel, |
| 660 | /* | 660 | /* |
| 661 | * We leave one entry for input and one entry for response | 661 | * We leave one entry for input and one entry for response |
diff --git a/net/atm/common.c b/net/atm/common.c index 7b491006eaf4..737bef59ce89 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
| @@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
| 531 | struct sk_buff *skb; | 531 | struct sk_buff *skb; |
| 532 | int copied, error = -EINVAL; | 532 | int copied, error = -EINVAL; |
| 533 | 533 | ||
| 534 | msg->msg_namelen = 0; | ||
| 535 | |||
| 534 | if (sock->state != SS_CONNECTED) | 536 | if (sock->state != SS_CONNECTED) |
| 535 | return -ENOTCONN; | 537 | return -ENOTCONN; |
| 536 | 538 | ||
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 7b11f8bc5071..e277e38f736b 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
| @@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1642 | ax25_address src; | 1642 | ax25_address src; |
| 1643 | const unsigned char *mac = skb_mac_header(skb); | 1643 | const unsigned char *mac = skb_mac_header(skb); |
| 1644 | 1644 | ||
| 1645 | memset(sax, 0, sizeof(struct full_sockaddr_ax25)); | ||
| 1645 | ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, | 1646 | ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, |
| 1646 | &digi, NULL, NULL); | 1647 | &digi, NULL, NULL); |
| 1647 | sax->sax25_family = AF_AX25; | 1648 | sax->sax25_family = AF_AX25; |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a0b253ecadaf..a5bb0a769eb9 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
| @@ -1288,7 +1288,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, | |||
| 1288 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; | 1288 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; |
| 1289 | 1289 | ||
| 1290 | /* unpack the aggregated packets and process them one by one */ | 1290 | /* unpack the aggregated packets and process them one by one */ |
| 1291 | do { | 1291 | while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, |
| 1292 | batadv_ogm_packet->tt_num_changes)) { | ||
| 1292 | tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN; | 1293 | tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN; |
| 1293 | 1294 | ||
| 1294 | batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, | 1295 | batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, |
| @@ -1299,8 +1300,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, | |||
| 1299 | 1300 | ||
| 1300 | packet_pos = packet_buff + buff_pos; | 1301 | packet_pos = packet_buff + buff_pos; |
| 1301 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; | 1302 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; |
| 1302 | } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, | 1303 | } |
| 1303 | batadv_ogm_packet->tt_num_changes)); | ||
| 1304 | 1304 | ||
| 1305 | kfree_skb(skb); | 1305 | kfree_skb(skb); |
| 1306 | return NET_RX_SUCCESS; | 1306 | return NET_RX_SUCCESS; |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 0488d70c8c35..fa563e497c48 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
| @@ -169,7 +169,7 @@ void batadv_mesh_free(struct net_device *soft_iface) | |||
| 169 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); | 169 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | int batadv_is_my_mac(const uint8_t *addr) | 172 | int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr) |
| 173 | { | 173 | { |
| 174 | const struct batadv_hard_iface *hard_iface; | 174 | const struct batadv_hard_iface *hard_iface; |
| 175 | 175 | ||
| @@ -178,6 +178,9 @@ int batadv_is_my_mac(const uint8_t *addr) | |||
| 178 | if (hard_iface->if_status != BATADV_IF_ACTIVE) | 178 | if (hard_iface->if_status != BATADV_IF_ACTIVE) |
| 179 | continue; | 179 | continue; |
| 180 | 180 | ||
| 181 | if (hard_iface->soft_iface != bat_priv->soft_iface) | ||
| 182 | continue; | ||
| 183 | |||
| 181 | if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { | 184 | if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { |
| 182 | rcu_read_unlock(); | 185 | rcu_read_unlock(); |
| 183 | return 1; | 186 | return 1; |
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index ced08b936a96..d40910dfc8ea 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h | |||
| @@ -162,7 +162,7 @@ extern struct workqueue_struct *batadv_event_workqueue; | |||
| 162 | 162 | ||
| 163 | int batadv_mesh_init(struct net_device *soft_iface); | 163 | int batadv_mesh_init(struct net_device *soft_iface); |
| 164 | void batadv_mesh_free(struct net_device *soft_iface); | 164 | void batadv_mesh_free(struct net_device *soft_iface); |
| 165 | int batadv_is_my_mac(const uint8_t *addr); | 165 | int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr); |
| 166 | struct batadv_hard_iface * | 166 | struct batadv_hard_iface * |
| 167 | batadv_seq_print_text_primary_if_get(struct seq_file *seq); | 167 | batadv_seq_print_text_primary_if_get(struct seq_file *seq); |
| 168 | int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | 168 | int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 5ee21cebbbb0..319f2906c71a 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
| @@ -402,7 +402,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
| 402 | goto out; | 402 | goto out; |
| 403 | 403 | ||
| 404 | /* not for me */ | 404 | /* not for me */ |
| 405 | if (!batadv_is_my_mac(ethhdr->h_dest)) | 405 | if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) |
| 406 | goto out; | 406 | goto out; |
| 407 | 407 | ||
| 408 | icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; | 408 | icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; |
| @@ -416,7 +416,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
| 416 | } | 416 | } |
| 417 | 417 | ||
| 418 | /* packet for me */ | 418 | /* packet for me */ |
| 419 | if (batadv_is_my_mac(icmp_packet->dst)) | 419 | if (batadv_is_my_mac(bat_priv, icmp_packet->dst)) |
| 420 | return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size); | 420 | return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size); |
| 421 | 421 | ||
| 422 | /* TTL exceeded */ | 422 | /* TTL exceeded */ |
| @@ -548,7 +548,8 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig, | |||
| 548 | return router; | 548 | return router; |
| 549 | } | 549 | } |
| 550 | 550 | ||
| 551 | static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size) | 551 | static int batadv_check_unicast_packet(struct batadv_priv *bat_priv, |
| 552 | struct sk_buff *skb, int hdr_size) | ||
| 552 | { | 553 | { |
| 553 | struct ethhdr *ethhdr; | 554 | struct ethhdr *ethhdr; |
| 554 | 555 | ||
| @@ -567,7 +568,7 @@ static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size) | |||
| 567 | return -1; | 568 | return -1; |
| 568 | 569 | ||
| 569 | /* not for me */ | 570 | /* not for me */ |
| 570 | if (!batadv_is_my_mac(ethhdr->h_dest)) | 571 | if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) |
| 571 | return -1; | 572 | return -1; |
| 572 | 573 | ||
| 573 | return 0; | 574 | return 0; |
| @@ -582,7 +583,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if) | |||
| 582 | char tt_flag; | 583 | char tt_flag; |
| 583 | size_t packet_size; | 584 | size_t packet_size; |
| 584 | 585 | ||
| 585 | if (batadv_check_unicast_packet(skb, hdr_size) < 0) | 586 | if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) |
| 586 | return NET_RX_DROP; | 587 | return NET_RX_DROP; |
| 587 | 588 | ||
| 588 | /* I could need to modify it */ | 589 | /* I could need to modify it */ |
| @@ -614,7 +615,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if) | |||
| 614 | case BATADV_TT_RESPONSE: | 615 | case BATADV_TT_RESPONSE: |
| 615 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX); | 616 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX); |
| 616 | 617 | ||
| 617 | if (batadv_is_my_mac(tt_query->dst)) { | 618 | if (batadv_is_my_mac(bat_priv, tt_query->dst)) { |
| 618 | /* packet needs to be linearized to access the TT | 619 | /* packet needs to be linearized to access the TT |
| 619 | * changes | 620 | * changes |
| 620 | */ | 621 | */ |
| @@ -657,14 +658,15 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if) | |||
| 657 | struct batadv_roam_adv_packet *roam_adv_packet; | 658 | struct batadv_roam_adv_packet *roam_adv_packet; |
| 658 | struct batadv_orig_node *orig_node; | 659 | struct batadv_orig_node *orig_node; |
| 659 | 660 | ||
| 660 | if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0) | 661 | if (batadv_check_unicast_packet(bat_priv, skb, |
| 662 | sizeof(*roam_adv_packet)) < 0) | ||
| 661 | goto out; | 663 | goto out; |
| 662 | 664 | ||
| 663 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX); | 665 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX); |
| 664 | 666 | ||
| 665 | roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data; | 667 | roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data; |
| 666 | 668 | ||
| 667 | if (!batadv_is_my_mac(roam_adv_packet->dst)) | 669 | if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst)) |
| 668 | return batadv_route_unicast_packet(skb, recv_if); | 670 | return batadv_route_unicast_packet(skb, recv_if); |
| 669 | 671 | ||
| 670 | /* check if it is a backbone gateway. we don't accept | 672 | /* check if it is a backbone gateway. we don't accept |
| @@ -967,7 +969,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, | |||
| 967 | * last time) the packet had an updated information or not | 969 | * last time) the packet had an updated information or not |
| 968 | */ | 970 | */ |
| 969 | curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); | 971 | curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); |
| 970 | if (!batadv_is_my_mac(unicast_packet->dest)) { | 972 | if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) { |
| 971 | orig_node = batadv_orig_hash_find(bat_priv, | 973 | orig_node = batadv_orig_hash_find(bat_priv, |
| 972 | unicast_packet->dest); | 974 | unicast_packet->dest); |
| 973 | /* if it is not possible to find the orig_node representing the | 975 | /* if it is not possible to find the orig_node representing the |
| @@ -1044,14 +1046,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
| 1044 | if (is4addr) | 1046 | if (is4addr) |
| 1045 | hdr_size = sizeof(*unicast_4addr_packet); | 1047 | hdr_size = sizeof(*unicast_4addr_packet); |
| 1046 | 1048 | ||
| 1047 | if (batadv_check_unicast_packet(skb, hdr_size) < 0) | 1049 | if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) |
| 1048 | return NET_RX_DROP; | 1050 | return NET_RX_DROP; |
| 1049 | 1051 | ||
| 1050 | if (!batadv_check_unicast_ttvn(bat_priv, skb)) | 1052 | if (!batadv_check_unicast_ttvn(bat_priv, skb)) |
| 1051 | return NET_RX_DROP; | 1053 | return NET_RX_DROP; |
| 1052 | 1054 | ||
| 1053 | /* packet for me */ | 1055 | /* packet for me */ |
| 1054 | if (batadv_is_my_mac(unicast_packet->dest)) { | 1056 | if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { |
| 1055 | if (is4addr) { | 1057 | if (is4addr) { |
| 1056 | batadv_dat_inc_counter(bat_priv, | 1058 | batadv_dat_inc_counter(bat_priv, |
| 1057 | unicast_4addr_packet->subtype); | 1059 | unicast_4addr_packet->subtype); |
| @@ -1088,7 +1090,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb, | |||
| 1088 | struct sk_buff *new_skb = NULL; | 1090 | struct sk_buff *new_skb = NULL; |
| 1089 | int ret; | 1091 | int ret; |
| 1090 | 1092 | ||
| 1091 | if (batadv_check_unicast_packet(skb, hdr_size) < 0) | 1093 | if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) |
| 1092 | return NET_RX_DROP; | 1094 | return NET_RX_DROP; |
| 1093 | 1095 | ||
| 1094 | if (!batadv_check_unicast_ttvn(bat_priv, skb)) | 1096 | if (!batadv_check_unicast_ttvn(bat_priv, skb)) |
| @@ -1097,7 +1099,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb, | |||
| 1097 | unicast_packet = (struct batadv_unicast_frag_packet *)skb->data; | 1099 | unicast_packet = (struct batadv_unicast_frag_packet *)skb->data; |
| 1098 | 1100 | ||
| 1099 | /* packet for me */ | 1101 | /* packet for me */ |
| 1100 | if (batadv_is_my_mac(unicast_packet->dest)) { | 1102 | if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { |
| 1101 | ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb); | 1103 | ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb); |
| 1102 | 1104 | ||
| 1103 | if (ret == NET_RX_DROP) | 1105 | if (ret == NET_RX_DROP) |
| @@ -1151,13 +1153,13 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, | |||
| 1151 | goto out; | 1153 | goto out; |
| 1152 | 1154 | ||
| 1153 | /* ignore broadcasts sent by myself */ | 1155 | /* ignore broadcasts sent by myself */ |
| 1154 | if (batadv_is_my_mac(ethhdr->h_source)) | 1156 | if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) |
| 1155 | goto out; | 1157 | goto out; |
| 1156 | 1158 | ||
| 1157 | bcast_packet = (struct batadv_bcast_packet *)skb->data; | 1159 | bcast_packet = (struct batadv_bcast_packet *)skb->data; |
| 1158 | 1160 | ||
| 1159 | /* ignore broadcasts originated by myself */ | 1161 | /* ignore broadcasts originated by myself */ |
| 1160 | if (batadv_is_my_mac(bcast_packet->orig)) | 1162 | if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) |
| 1161 | goto out; | 1163 | goto out; |
| 1162 | 1164 | ||
| 1163 | if (bcast_packet->header.ttl < 2) | 1165 | if (bcast_packet->header.ttl < 2) |
| @@ -1243,14 +1245,14 @@ int batadv_recv_vis_packet(struct sk_buff *skb, | |||
| 1243 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | 1245 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
| 1244 | 1246 | ||
| 1245 | /* not for me */ | 1247 | /* not for me */ |
| 1246 | if (!batadv_is_my_mac(ethhdr->h_dest)) | 1248 | if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) |
| 1247 | return NET_RX_DROP; | 1249 | return NET_RX_DROP; |
| 1248 | 1250 | ||
| 1249 | /* ignore own packets */ | 1251 | /* ignore own packets */ |
| 1250 | if (batadv_is_my_mac(vis_packet->vis_orig)) | 1252 | if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig)) |
| 1251 | return NET_RX_DROP; | 1253 | return NET_RX_DROP; |
| 1252 | 1254 | ||
| 1253 | if (batadv_is_my_mac(vis_packet->sender_orig)) | 1255 | if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig)) |
| 1254 | return NET_RX_DROP; | 1256 | return NET_RX_DROP; |
| 1255 | 1257 | ||
| 1256 | switch (vis_packet->vis_type) { | 1258 | switch (vis_packet->vis_type) { |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 98a66a021a60..7abee19567e9 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
| @@ -1953,7 +1953,7 @@ out: | |||
| 1953 | bool batadv_send_tt_response(struct batadv_priv *bat_priv, | 1953 | bool batadv_send_tt_response(struct batadv_priv *bat_priv, |
| 1954 | struct batadv_tt_query_packet *tt_request) | 1954 | struct batadv_tt_query_packet *tt_request) |
| 1955 | { | 1955 | { |
| 1956 | if (batadv_is_my_mac(tt_request->dst)) { | 1956 | if (batadv_is_my_mac(bat_priv, tt_request->dst)) { |
| 1957 | /* don't answer backbone gws! */ | 1957 | /* don't answer backbone gws! */ |
| 1958 | if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src)) | 1958 | if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src)) |
| 1959 | return true; | 1959 | return true; |
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index c053244b97bd..6a1e646be96d 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c | |||
| @@ -477,7 +477,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv, | |||
| 477 | 477 | ||
| 478 | /* Are we the target for this VIS packet? */ | 478 | /* Are we the target for this VIS packet? */ |
| 479 | if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && | 479 | if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && |
| 480 | batadv_is_my_mac(vis_packet->target_orig)) | 480 | batadv_is_my_mac(bat_priv, vis_packet->target_orig)) |
| 481 | are_target = 1; | 481 | are_target = 1; |
| 482 | 482 | ||
| 483 | spin_lock_bh(&bat_priv->vis.hash_lock); | 483 | spin_lock_bh(&bat_priv->vis.hash_lock); |
| @@ -496,7 +496,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv, | |||
| 496 | batadv_send_list_add(bat_priv, info); | 496 | batadv_send_list_add(bat_priv, info); |
| 497 | 497 | ||
| 498 | /* ... we're not the recipient (and thus need to forward). */ | 498 | /* ... we're not the recipient (and thus need to forward). */ |
| 499 | } else if (!batadv_is_my_mac(packet->target_orig)) { | 499 | } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) { |
| 500 | batadv_send_list_add(bat_priv, info); | 500 | batadv_send_list_add(bat_priv, info); |
| 501 | } | 501 | } |
| 502 | 502 | ||
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index d3ee69b35a78..0d1b08cc76e1 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
| @@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 230 | if (flags & (MSG_OOB)) | 230 | if (flags & (MSG_OOB)) |
| 231 | return -EOPNOTSUPP; | 231 | return -EOPNOTSUPP; |
| 232 | 232 | ||
| 233 | msg->msg_namelen = 0; | ||
| 234 | |||
| 233 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 235 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
| 234 | if (!skb) { | 236 | if (!skb) { |
| 235 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 237 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
| @@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 237 | return err; | 239 | return err; |
| 238 | } | 240 | } |
| 239 | 241 | ||
| 240 | msg->msg_namelen = 0; | ||
| 241 | |||
| 242 | copied = skb->len; | 242 | copied = skb->len; |
| 243 | if (len < copied) { | 243 | if (len < copied) { |
| 244 | msg->msg_flags |= MSG_TRUNC; | 244 | msg->msg_flags |= MSG_TRUNC; |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index c23bae86263b..7c9224bcce17 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
| @@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 608 | 608 | ||
| 609 | if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { | 609 | if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { |
| 610 | rfcomm_dlc_accept(d); | 610 | rfcomm_dlc_accept(d); |
| 611 | msg->msg_namelen = 0; | ||
| 611 | return 0; | 612 | return 0; |
| 612 | } | 613 | } |
| 613 | 614 | ||
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 79d87d8d4f51..fb6192c9812e 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
| @@ -359,6 +359,7 @@ static void __sco_sock_close(struct sock *sk) | |||
| 359 | sco_chan_del(sk, ECONNRESET); | 359 | sco_chan_del(sk, ECONNRESET); |
| 360 | break; | 360 | break; |
| 361 | 361 | ||
| 362 | case BT_CONNECT2: | ||
| 362 | case BT_CONNECT: | 363 | case BT_CONNECT: |
| 363 | case BT_DISCONN: | 364 | case BT_DISCONN: |
| 364 | sco_chan_del(sk, ECONNRESET); | 365 | sco_chan_del(sk, ECONNRESET); |
| @@ -664,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 664 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { | 665 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { |
| 665 | hci_conn_accept(pi->conn->hcon, 0); | 666 | hci_conn_accept(pi->conn->hcon, 0); |
| 666 | sk->sk_state = BT_CONFIG; | 667 | sk->sk_state = BT_CONFIG; |
| 668 | msg->msg_namelen = 0; | ||
| 667 | 669 | ||
| 668 | release_sock(sk); | 670 | release_sock(sk); |
| 669 | return 0; | 671 | return 0; |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index d5f1d3fd4b28..314c73ed418f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
| @@ -66,7 +66,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 66 | goto out; | 66 | goto out; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | mdst = br_mdb_get(br, skb); | 69 | mdst = br_mdb_get(br, skb, vid); |
| 70 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) | 70 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) |
| 71 | br_multicast_deliver(mdst, skb); | 71 | br_multicast_deliver(mdst, skb); |
| 72 | else | 72 | else |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index b0812c91c0f0..bab338e6270d 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
| @@ -423,7 +423,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, | |||
| 423 | return 0; | 423 | return 0; |
| 424 | br_warn(br, "adding interface %s with same address " | 424 | br_warn(br, "adding interface %s with same address " |
| 425 | "as a received packet\n", | 425 | "as a received packet\n", |
| 426 | source->dev->name); | 426 | source ? source->dev->name : br->dev->name); |
| 427 | fdb_delete(br, fdb); | 427 | fdb_delete(br, fdb); |
| 428 | } | 428 | } |
| 429 | 429 | ||
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index ef1b91431c6b..459dab22b3f6 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
| @@ -67,7 +67,8 @@ void br_port_carrier_check(struct net_bridge_port *p) | |||
| 67 | struct net_device *dev = p->dev; | 67 | struct net_device *dev = p->dev; |
| 68 | struct net_bridge *br = p->br; | 68 | struct net_bridge *br = p->br; |
| 69 | 69 | ||
| 70 | if (netif_running(dev) && netif_oper_up(dev)) | 70 | if (!(p->flags & BR_ADMIN_COST) && |
| 71 | netif_running(dev) && netif_oper_up(dev)) | ||
| 71 | p->path_cost = port_cost(dev); | 72 | p->path_cost = port_cost(dev); |
| 72 | 73 | ||
| 73 | if (!netif_running(br->dev)) | 74 | if (!netif_running(br->dev)) |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 480330151898..828e2bcc1f52 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
| @@ -97,7 +97,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
| 97 | if (is_broadcast_ether_addr(dest)) | 97 | if (is_broadcast_ether_addr(dest)) |
| 98 | skb2 = skb; | 98 | skb2 = skb; |
| 99 | else if (is_multicast_ether_addr(dest)) { | 99 | else if (is_multicast_ether_addr(dest)) { |
| 100 | mdst = br_mdb_get(br, skb); | 100 | mdst = br_mdb_get(br, skb, vid); |
| 101 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { | 101 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { |
| 102 | if ((mdst && mdst->mglist) || | 102 | if ((mdst && mdst->mglist) || |
| 103 | br_multicast_is_router(br)) | 103 | br_multicast_is_router(br)) |
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 9f97b850fc65..ee79f3f20383 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
| @@ -80,6 +80,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, | |||
| 80 | port = p->port; | 80 | port = p->port; |
| 81 | if (port) { | 81 | if (port) { |
| 82 | struct br_mdb_entry e; | 82 | struct br_mdb_entry e; |
| 83 | memset(&e, 0, sizeof(e)); | ||
| 83 | e.ifindex = port->dev->ifindex; | 84 | e.ifindex = port->dev->ifindex; |
| 84 | e.state = p->state; | 85 | e.state = p->state; |
| 85 | if (p->addr.proto == htons(ETH_P_IP)) | 86 | if (p->addr.proto == htons(ETH_P_IP)) |
| @@ -136,6 +137,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 136 | break; | 137 | break; |
| 137 | 138 | ||
| 138 | bpm = nlmsg_data(nlh); | 139 | bpm = nlmsg_data(nlh); |
| 140 | memset(bpm, 0, sizeof(*bpm)); | ||
| 139 | bpm->ifindex = dev->ifindex; | 141 | bpm->ifindex = dev->ifindex; |
| 140 | if (br_mdb_fill_info(skb, cb, dev) < 0) | 142 | if (br_mdb_fill_info(skb, cb, dev) < 0) |
| 141 | goto out; | 143 | goto out; |
| @@ -171,6 +173,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb, | |||
| 171 | return -EMSGSIZE; | 173 | return -EMSGSIZE; |
| 172 | 174 | ||
| 173 | bpm = nlmsg_data(nlh); | 175 | bpm = nlmsg_data(nlh); |
| 176 | memset(bpm, 0, sizeof(*bpm)); | ||
| 174 | bpm->family = AF_BRIDGE; | 177 | bpm->family = AF_BRIDGE; |
| 175 | bpm->ifindex = dev->ifindex; | 178 | bpm->ifindex = dev->ifindex; |
| 176 | nest = nla_nest_start(skb, MDBA_MDB); | 179 | nest = nla_nest_start(skb, MDBA_MDB); |
| @@ -228,6 +231,7 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, | |||
| 228 | { | 231 | { |
| 229 | struct br_mdb_entry entry; | 232 | struct br_mdb_entry entry; |
| 230 | 233 | ||
| 234 | memset(&entry, 0, sizeof(entry)); | ||
| 231 | entry.ifindex = port->dev->ifindex; | 235 | entry.ifindex = port->dev->ifindex; |
| 232 | entry.addr.proto = group->proto; | 236 | entry.addr.proto = group->proto; |
| 233 | entry.addr.u.ip4 = group->u.ip4; | 237 | entry.addr.u.ip4 = group->u.ip4; |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 10e6fce1bb62..923fbeaf7afd 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
| @@ -132,7 +132,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get( | |||
| 132 | #endif | 132 | #endif |
| 133 | 133 | ||
| 134 | struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 134 | struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
| 135 | struct sk_buff *skb) | 135 | struct sk_buff *skb, u16 vid) |
| 136 | { | 136 | { |
| 137 | struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); | 137 | struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); |
| 138 | struct br_ip ip; | 138 | struct br_ip ip; |
| @@ -144,6 +144,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | |||
| 144 | return NULL; | 144 | return NULL; |
| 145 | 145 | ||
| 146 | ip.proto = skb->protocol; | 146 | ip.proto = skb->protocol; |
| 147 | ip.vid = vid; | ||
| 147 | 148 | ||
| 148 | switch (skb->protocol) { | 149 | switch (skb->protocol) { |
| 149 | case htons(ETH_P_IP): | 150 | case htons(ETH_P_IP): |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 27aa3ee517ce..299fc5f40a26 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
| @@ -29,6 +29,7 @@ static inline size_t br_port_info_size(void) | |||
| 29 | + nla_total_size(1) /* IFLA_BRPORT_MODE */ | 29 | + nla_total_size(1) /* IFLA_BRPORT_MODE */ |
| 30 | + nla_total_size(1) /* IFLA_BRPORT_GUARD */ | 30 | + nla_total_size(1) /* IFLA_BRPORT_GUARD */ |
| 31 | + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ | 31 | + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ |
| 32 | + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ | ||
| 32 | + 0; | 33 | + 0; |
| 33 | } | 34 | } |
| 34 | 35 | ||
| @@ -329,6 +330,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) | |||
| 329 | br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); | 330 | br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); |
| 330 | br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); | 331 | br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); |
| 331 | br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); | 332 | br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); |
| 333 | br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); | ||
| 332 | 334 | ||
| 333 | if (tb[IFLA_BRPORT_COST]) { | 335 | if (tb[IFLA_BRPORT_COST]) { |
| 334 | err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); | 336 | err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 6d314c4e6bcb..d2c043a857b6 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
| @@ -156,6 +156,7 @@ struct net_bridge_port | |||
| 156 | #define BR_BPDU_GUARD 0x00000002 | 156 | #define BR_BPDU_GUARD 0x00000002 |
| 157 | #define BR_ROOT_BLOCK 0x00000004 | 157 | #define BR_ROOT_BLOCK 0x00000004 |
| 158 | #define BR_MULTICAST_FAST_LEAVE 0x00000008 | 158 | #define BR_MULTICAST_FAST_LEAVE 0x00000008 |
| 159 | #define BR_ADMIN_COST 0x00000010 | ||
| 159 | 160 | ||
| 160 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 161 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
| 161 | u32 multicast_startup_queries_sent; | 162 | u32 multicast_startup_queries_sent; |
| @@ -442,7 +443,7 @@ extern int br_multicast_rcv(struct net_bridge *br, | |||
| 442 | struct net_bridge_port *port, | 443 | struct net_bridge_port *port, |
| 443 | struct sk_buff *skb); | 444 | struct sk_buff *skb); |
| 444 | extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 445 | extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
| 445 | struct sk_buff *skb); | 446 | struct sk_buff *skb, u16 vid); |
| 446 | extern void br_multicast_add_port(struct net_bridge_port *port); | 447 | extern void br_multicast_add_port(struct net_bridge_port *port); |
| 447 | extern void br_multicast_del_port(struct net_bridge_port *port); | 448 | extern void br_multicast_del_port(struct net_bridge_port *port); |
| 448 | extern void br_multicast_enable_port(struct net_bridge_port *port); | 449 | extern void br_multicast_enable_port(struct net_bridge_port *port); |
| @@ -504,7 +505,7 @@ static inline int br_multicast_rcv(struct net_bridge *br, | |||
| 504 | } | 505 | } |
| 505 | 506 | ||
| 506 | static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, | 507 | static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, |
| 507 | struct sk_buff *skb) | 508 | struct sk_buff *skb, u16 vid) |
| 508 | { | 509 | { |
| 509 | return NULL; | 510 | return NULL; |
| 510 | } | 511 | } |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 0bdb4ebd362b..d45e760141bb 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
| @@ -288,6 +288,7 @@ int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost) | |||
| 288 | path_cost > BR_MAX_PATH_COST) | 288 | path_cost > BR_MAX_PATH_COST) |
| 289 | return -ERANGE; | 289 | return -ERANGE; |
| 290 | 290 | ||
| 291 | p->flags |= BR_ADMIN_COST; | ||
| 291 | p->path_cost = path_cost; | 292 | p->path_cost = path_cost; |
| 292 | br_configuration_update(p->br); | 293 | br_configuration_update(p->br); |
| 293 | br_port_state_selection(p->br); | 294 | br_port_state_selection(p->br); |
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 1ae1d9cb278d..21760f008974 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c | |||
| @@ -118,7 +118,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev) | |||
| 118 | return NULL; | 118 | return NULL; |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | void caif_flow_cb(struct sk_buff *skb) | 121 | static void caif_flow_cb(struct sk_buff *skb) |
| 122 | { | 122 | { |
| 123 | struct caif_device_entry *caifd; | 123 | struct caif_device_entry *caifd; |
| 124 | void (*dtor)(struct sk_buff *skb) = NULL; | 124 | void (*dtor)(struct sk_buff *skb) = NULL; |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 095259f83902..ff2ff3ce6965 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
| @@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 286 | if (m->msg_flags&MSG_OOB) | 286 | if (m->msg_flags&MSG_OOB) |
| 287 | goto read_error; | 287 | goto read_error; |
| 288 | 288 | ||
| 289 | m->msg_namelen = 0; | ||
| 290 | |||
| 289 | skb = skb_recv_datagram(sk, flags, 0 , &ret); | 291 | skb = skb_recv_datagram(sk, flags, 0 , &ret); |
| 290 | if (!skb) | 292 | if (!skb) |
| 291 | goto read_error; | 293 | goto read_error; |
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index 3ebc8cbc91ff..ef8ebaa993cf 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c | |||
| @@ -81,8 +81,8 @@ static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |||
| 81 | layr->up->ctrlcmd(layr->up, ctrl, layr->id); | 81 | layr->up->ctrlcmd(layr->up, ctrl, layr->id); |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], | 84 | static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], |
| 85 | u8 braddr[ETH_ALEN]) | 85 | u8 braddr[ETH_ALEN]) |
| 86 | { | 86 | { |
| 87 | struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); | 87 | struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); |
| 88 | 88 | ||
diff --git a/net/can/gw.c b/net/can/gw.c index 2d117dc5ebea..117814a7e73c 100644 --- a/net/can/gw.c +++ b/net/can/gw.c | |||
| @@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb, | |||
| 466 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { | 466 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { |
| 467 | hlist_del(&gwj->list); | 467 | hlist_del(&gwj->list); |
| 468 | cgw_unregister_filter(gwj); | 468 | cgw_unregister_filter(gwj); |
| 469 | kfree(gwj); | 469 | kmem_cache_free(cgw_cache, gwj); |
| 470 | } | 470 | } |
| 471 | } | 471 | } |
| 472 | } | 472 | } |
| @@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void) | |||
| 864 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { | 864 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { |
| 865 | hlist_del(&gwj->list); | 865 | hlist_del(&gwj->list); |
| 866 | cgw_unregister_filter(gwj); | 866 | cgw_unregister_filter(gwj); |
| 867 | kfree(gwj); | 867 | kmem_cache_free(cgw_cache, gwj); |
| 868 | } | 868 | } |
| 869 | } | 869 | } |
| 870 | 870 | ||
| @@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
| 920 | 920 | ||
| 921 | hlist_del(&gwj->list); | 921 | hlist_del(&gwj->list); |
| 922 | cgw_unregister_filter(gwj); | 922 | cgw_unregister_filter(gwj); |
| 923 | kfree(gwj); | 923 | kmem_cache_free(cgw_cache, gwj); |
| 924 | err = 0; | 924 | err = 0; |
| 925 | break; | 925 | break; |
| 926 | } | 926 | } |
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 69bc4bf89e3e..4543b9aba40c 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
| @@ -654,6 +654,24 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) | |||
| 654 | return 0; | 654 | return 0; |
| 655 | } | 655 | } |
| 656 | 656 | ||
| 657 | static int __decode_pgid(void **p, void *end, struct ceph_pg *pg) | ||
| 658 | { | ||
| 659 | u8 v; | ||
| 660 | |||
| 661 | ceph_decode_need(p, end, 1+8+4+4, bad); | ||
| 662 | v = ceph_decode_8(p); | ||
| 663 | if (v != 1) | ||
| 664 | goto bad; | ||
| 665 | pg->pool = ceph_decode_64(p); | ||
| 666 | pg->seed = ceph_decode_32(p); | ||
| 667 | *p += 4; /* skip preferred */ | ||
| 668 | return 0; | ||
| 669 | |||
| 670 | bad: | ||
| 671 | dout("error decoding pgid\n"); | ||
| 672 | return -EINVAL; | ||
| 673 | } | ||
| 674 | |||
| 657 | /* | 675 | /* |
| 658 | * decode a full map. | 676 | * decode a full map. |
| 659 | */ | 677 | */ |
| @@ -745,13 +763,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
| 745 | for (i = 0; i < len; i++) { | 763 | for (i = 0; i < len; i++) { |
| 746 | int n, j; | 764 | int n, j; |
| 747 | struct ceph_pg pgid; | 765 | struct ceph_pg pgid; |
| 748 | struct ceph_pg_v1 pgid_v1; | ||
| 749 | struct ceph_pg_mapping *pg; | 766 | struct ceph_pg_mapping *pg; |
| 750 | 767 | ||
| 751 | ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); | 768 | err = __decode_pgid(p, end, &pgid); |
| 752 | ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); | 769 | if (err) |
| 753 | pgid.pool = le32_to_cpu(pgid_v1.pool); | 770 | goto bad; |
| 754 | pgid.seed = le16_to_cpu(pgid_v1.ps); | 771 | ceph_decode_need(p, end, sizeof(u32), bad); |
| 755 | n = ceph_decode_32(p); | 772 | n = ceph_decode_32(p); |
| 756 | err = -EINVAL; | 773 | err = -EINVAL; |
| 757 | if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) | 774 | if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) |
| @@ -818,8 +835,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
| 818 | u16 version; | 835 | u16 version; |
| 819 | 836 | ||
| 820 | ceph_decode_16_safe(p, end, version, bad); | 837 | ceph_decode_16_safe(p, end, version, bad); |
| 821 | if (version > 6) { | 838 | if (version != 6) { |
| 822 | pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6); | 839 | pr_warning("got unknown v %d != 6 of inc osdmap\n", version); |
| 823 | goto bad; | 840 | goto bad; |
| 824 | } | 841 | } |
| 825 | 842 | ||
| @@ -963,15 +980,14 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
| 963 | while (len--) { | 980 | while (len--) { |
| 964 | struct ceph_pg_mapping *pg; | 981 | struct ceph_pg_mapping *pg; |
| 965 | int j; | 982 | int j; |
| 966 | struct ceph_pg_v1 pgid_v1; | ||
| 967 | struct ceph_pg pgid; | 983 | struct ceph_pg pgid; |
| 968 | u32 pglen; | 984 | u32 pglen; |
| 969 | ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); | ||
| 970 | ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); | ||
| 971 | pgid.pool = le32_to_cpu(pgid_v1.pool); | ||
| 972 | pgid.seed = le16_to_cpu(pgid_v1.ps); | ||
| 973 | pglen = ceph_decode_32(p); | ||
| 974 | 985 | ||
| 986 | err = __decode_pgid(p, end, &pgid); | ||
| 987 | if (err) | ||
| 988 | goto bad; | ||
| 989 | ceph_decode_need(p, end, sizeof(u32), bad); | ||
| 990 | pglen = ceph_decode_32(p); | ||
| 975 | if (pglen) { | 991 | if (pglen) { |
| 976 | ceph_decode_need(p, end, pglen*sizeof(u32), bad); | 992 | ceph_decode_need(p, end, pglen*sizeof(u32), bad); |
| 977 | 993 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index a06a7a58dd11..b24ab0e98eb4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -1545,7 +1545,6 @@ void net_enable_timestamp(void) | |||
| 1545 | return; | 1545 | return; |
| 1546 | } | 1546 | } |
| 1547 | #endif | 1547 | #endif |
| 1548 | WARN_ON(in_interrupt()); | ||
| 1549 | static_key_slow_inc(&netstamp_needed); | 1548 | static_key_slow_inc(&netstamp_needed); |
| 1550 | } | 1549 | } |
| 1551 | EXPORT_SYMBOL(net_enable_timestamp); | 1550 | EXPORT_SYMBOL(net_enable_timestamp); |
| @@ -1625,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
| 1625 | } | 1624 | } |
| 1626 | 1625 | ||
| 1627 | skb_orphan(skb); | 1626 | skb_orphan(skb); |
| 1628 | nf_reset(skb); | ||
| 1629 | 1627 | ||
| 1630 | if (unlikely(!is_skb_forwardable(dev, skb))) { | 1628 | if (unlikely(!is_skb_forwardable(dev, skb))) { |
| 1631 | atomic_long_inc(&dev->rx_dropped); | 1629 | atomic_long_inc(&dev->rx_dropped); |
| @@ -1641,6 +1639,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
| 1641 | skb->mark = 0; | 1639 | skb->mark = 0; |
| 1642 | secpath_reset(skb); | 1640 | secpath_reset(skb); |
| 1643 | nf_reset(skb); | 1641 | nf_reset(skb); |
| 1642 | nf_reset_trace(skb); | ||
| 1644 | return netif_rx(skb); | 1643 | return netif_rx(skb); |
| 1645 | } | 1644 | } |
| 1646 | EXPORT_SYMBOL_GPL(dev_forward_skb); | 1645 | EXPORT_SYMBOL_GPL(dev_forward_skb); |
| @@ -2149,6 +2148,9 @@ static void skb_warn_bad_offload(const struct sk_buff *skb) | |||
| 2149 | struct net_device *dev = skb->dev; | 2148 | struct net_device *dev = skb->dev; |
| 2150 | const char *driver = ""; | 2149 | const char *driver = ""; |
| 2151 | 2150 | ||
| 2151 | if (!net_ratelimit()) | ||
| 2152 | return; | ||
| 2153 | |||
| 2152 | if (dev && dev->dev.parent) | 2154 | if (dev && dev->dev.parent) |
| 2153 | driver = dev_driver_string(dev->dev.parent); | 2155 | driver = dev_driver_string(dev->dev.parent); |
| 2154 | 2156 | ||
| @@ -2219,9 +2221,9 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | |||
| 2219 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 2221 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
| 2220 | struct packet_offload *ptype; | 2222 | struct packet_offload *ptype; |
| 2221 | __be16 type = skb->protocol; | 2223 | __be16 type = skb->protocol; |
| 2224 | int vlan_depth = ETH_HLEN; | ||
| 2222 | 2225 | ||
| 2223 | while (type == htons(ETH_P_8021Q)) { | 2226 | while (type == htons(ETH_P_8021Q)) { |
| 2224 | int vlan_depth = ETH_HLEN; | ||
| 2225 | struct vlan_hdr *vh; | 2227 | struct vlan_hdr *vh; |
| 2226 | 2228 | ||
| 2227 | if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) | 2229 | if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) |
| @@ -3315,6 +3317,7 @@ int netdev_rx_handler_register(struct net_device *dev, | |||
| 3315 | if (dev->rx_handler) | 3317 | if (dev->rx_handler) |
| 3316 | return -EBUSY; | 3318 | return -EBUSY; |
| 3317 | 3319 | ||
| 3320 | /* Note: rx_handler_data must be set before rx_handler */ | ||
| 3318 | rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); | 3321 | rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); |
| 3319 | rcu_assign_pointer(dev->rx_handler, rx_handler); | 3322 | rcu_assign_pointer(dev->rx_handler, rx_handler); |
| 3320 | 3323 | ||
| @@ -3335,6 +3338,11 @@ void netdev_rx_handler_unregister(struct net_device *dev) | |||
| 3335 | 3338 | ||
| 3336 | ASSERT_RTNL(); | 3339 | ASSERT_RTNL(); |
| 3337 | RCU_INIT_POINTER(dev->rx_handler, NULL); | 3340 | RCU_INIT_POINTER(dev->rx_handler, NULL); |
| 3341 | /* a reader seeing a non NULL rx_handler in a rcu_read_lock() | ||
| 3342 | * section has a guarantee to see a non NULL rx_handler_data | ||
| 3343 | * as well. | ||
| 3344 | */ | ||
| 3345 | synchronize_net(); | ||
| 3338 | RCU_INIT_POINTER(dev->rx_handler_data, NULL); | 3346 | RCU_INIT_POINTER(dev->rx_handler_data, NULL); |
| 3339 | } | 3347 | } |
| 3340 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); | 3348 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); |
| @@ -3444,6 +3452,7 @@ ncls: | |||
| 3444 | } | 3452 | } |
| 3445 | switch (rx_handler(&skb)) { | 3453 | switch (rx_handler(&skb)) { |
| 3446 | case RX_HANDLER_CONSUMED: | 3454 | case RX_HANDLER_CONSUMED: |
| 3455 | ret = NET_RX_SUCCESS; | ||
| 3447 | goto unlock; | 3456 | goto unlock; |
| 3448 | case RX_HANDLER_ANOTHER: | 3457 | case RX_HANDLER_ANOTHER: |
| 3449 | goto another_round; | 3458 | goto another_round; |
| @@ -4103,7 +4112,7 @@ static void net_rx_action(struct softirq_action *h) | |||
| 4103 | * Allow this to run for 2 jiffies since which will allow | 4112 | * Allow this to run for 2 jiffies since which will allow |
| 4104 | * an average latency of 1.5/HZ. | 4113 | * an average latency of 1.5/HZ. |
| 4105 | */ | 4114 | */ |
| 4106 | if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) | 4115 | if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) |
| 4107 | goto softnet_break; | 4116 | goto softnet_break; |
| 4108 | 4117 | ||
| 4109 | local_irq_enable(); | 4118 | local_irq_enable(); |
| @@ -4780,7 +4789,7 @@ EXPORT_SYMBOL(dev_set_mac_address); | |||
| 4780 | /** | 4789 | /** |
| 4781 | * dev_change_carrier - Change device carrier | 4790 | * dev_change_carrier - Change device carrier |
| 4782 | * @dev: device | 4791 | * @dev: device |
| 4783 | * @new_carries: new value | 4792 | * @new_carrier: new value |
| 4784 | * | 4793 | * |
| 4785 | * Change device carrier | 4794 | * Change device carrier |
| 4786 | */ | 4795 | */ |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index bd2eb9d3e369..abdc9e6ef33e 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
| @@ -37,7 +37,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, | |||
| 37 | ha->type = addr_type; | 37 | ha->type = addr_type; |
| 38 | ha->refcount = 1; | 38 | ha->refcount = 1; |
| 39 | ha->global_use = global; | 39 | ha->global_use = global; |
| 40 | ha->synced = false; | 40 | ha->synced = 0; |
| 41 | list_add_tail_rcu(&ha->list, &list->list); | 41 | list_add_tail_rcu(&ha->list, &list->list); |
| 42 | list->count++; | 42 | list->count++; |
| 43 | 43 | ||
| @@ -165,7 +165,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | |||
| 165 | addr_len, ha->type); | 165 | addr_len, ha->type); |
| 166 | if (err) | 166 | if (err) |
| 167 | break; | 167 | break; |
| 168 | ha->synced = true; | 168 | ha->synced++; |
| 169 | ha->refcount++; | 169 | ha->refcount++; |
| 170 | } else if (ha->refcount == 1) { | 170 | } else if (ha->refcount == 1) { |
| 171 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); | 171 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); |
| @@ -186,7 +186,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | |||
| 186 | if (ha->synced) { | 186 | if (ha->synced) { |
| 187 | __hw_addr_del(to_list, ha->addr, | 187 | __hw_addr_del(to_list, ha->addr, |
| 188 | addr_len, ha->type); | 188 | addr_len, ha->type); |
| 189 | ha->synced = false; | 189 | ha->synced--; |
| 190 | __hw_addr_del(from_list, ha->addr, | 190 | __hw_addr_del(from_list, ha->addr, |
| 191 | addr_len, ha->type); | 191 | addr_len, ha->type); |
| 192 | } | 192 | } |
diff --git a/net/core/flow.c b/net/core/flow.c index c56ea6f7f6c7..2bfd081c59f7 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
| @@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data) | |||
| 328 | struct flow_flush_info *info = data; | 328 | struct flow_flush_info *info = data; |
| 329 | struct tasklet_struct *tasklet; | 329 | struct tasklet_struct *tasklet; |
| 330 | 330 | ||
| 331 | tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); | 331 | tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet; |
| 332 | tasklet->data = (unsigned long)info; | 332 | tasklet->data = (unsigned long)info; |
| 333 | tasklet_schedule(tasklet); | 333 | tasklet_schedule(tasklet); |
| 334 | } | 334 | } |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 9d4c7201400d..e187bf06d673 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
| @@ -140,6 +140,8 @@ ipv6: | |||
| 140 | flow->ports = *ports; | 140 | flow->ports = *ports; |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | flow->thoff = (u16) nhoff; | ||
| 144 | |||
| 143 | return true; | 145 | return true; |
| 144 | } | 146 | } |
| 145 | EXPORT_SYMBOL(skb_flow_dissect); | 147 | EXPORT_SYMBOL(skb_flow_dissect); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b376410ff259..23854b51a259 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) | |||
| 496 | } | 496 | } |
| 497 | if (ops->fill_info) { | 497 | if (ops->fill_info) { |
| 498 | data = nla_nest_start(skb, IFLA_INFO_DATA); | 498 | data = nla_nest_start(skb, IFLA_INFO_DATA); |
| 499 | if (data == NULL) | 499 | if (data == NULL) { |
| 500 | err = -EMSGSIZE; | ||
| 500 | goto err_cancel_link; | 501 | goto err_cancel_link; |
| 502 | } | ||
| 501 | err = ops->fill_info(skb, dev); | 503 | err = ops->fill_info(skb, dev); |
| 502 | if (err < 0) | 504 | if (err < 0) |
| 503 | goto err_cancel_data; | 505 | goto err_cancel_data; |
| @@ -979,6 +981,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
| 979 | * report anything. | 981 | * report anything. |
| 980 | */ | 982 | */ |
| 981 | ivi.spoofchk = -1; | 983 | ivi.spoofchk = -1; |
| 984 | memset(ivi.mac, 0, sizeof(ivi.mac)); | ||
| 982 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) | 985 | if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) |
| 983 | break; | 986 | break; |
| 984 | vf_mac.vf = | 987 | vf_mac.vf = |
| @@ -1069,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1069 | rcu_read_lock(); | 1072 | rcu_read_lock(); |
| 1070 | cb->seq = net->dev_base_seq; | 1073 | cb->seq = net->dev_base_seq; |
| 1071 | 1074 | ||
| 1072 | if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, | 1075 | if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, |
| 1073 | ifla_policy) >= 0) { | 1076 | ifla_policy) >= 0) { |
| 1074 | 1077 | ||
| 1075 | if (tb[IFLA_EXT_MASK]) | 1078 | if (tb[IFLA_EXT_MASK]) |
| @@ -1919,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 1919 | u32 ext_filter_mask = 0; | 1922 | u32 ext_filter_mask = 0; |
| 1920 | u16 min_ifinfo_dump_size = 0; | 1923 | u16 min_ifinfo_dump_size = 0; |
| 1921 | 1924 | ||
| 1922 | if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, | 1925 | if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, |
| 1923 | ifla_policy) >= 0) { | 1926 | ifla_policy) >= 0) { |
| 1924 | if (tb[IFLA_EXT_MASK]) | 1927 | if (tb[IFLA_EXT_MASK]) |
| 1925 | ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); | 1928 | ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); |
| @@ -2620,7 +2623,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 2620 | struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); | 2623 | struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); |
| 2621 | 2624 | ||
| 2622 | while (RTA_OK(attr, attrlen)) { | 2625 | while (RTA_OK(attr, attrlen)) { |
| 2623 | unsigned int flavor = attr->rta_type; | 2626 | unsigned int flavor = attr->rta_type & NLA_TYPE_MASK; |
| 2624 | if (flavor) { | 2627 | if (flavor) { |
| 2625 | if (flavor > rta_max[sz_idx]) | 2628 | if (flavor > rta_max[sz_idx]) |
| 2626 | return -EINVAL; | 2629 | return -EINVAL; |
diff --git a/net/core/scm.c b/net/core/scm.c index 905dcc6ad1e3..2dc6cdaaae8a 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
| 25 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
| 26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
| 27 | #include <linux/pid_namespace.h> | ||
| 27 | #include <linux/pid.h> | 28 | #include <linux/pid.h> |
| 28 | #include <linux/nsproxy.h> | 29 | #include <linux/nsproxy.h> |
| 29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
| @@ -52,7 +53,8 @@ static __inline__ int scm_check_creds(struct ucred *creds) | |||
| 52 | if (!uid_valid(uid) || !gid_valid(gid)) | 53 | if (!uid_valid(uid) || !gid_valid(gid)) |
| 53 | return -EINVAL; | 54 | return -EINVAL; |
| 54 | 55 | ||
| 55 | if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) && | 56 | if ((creds->pid == task_tgid_vnr(current) || |
| 57 | ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && | ||
| 56 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || | 58 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || |
| 57 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && | 59 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && |
| 58 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || | 60 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 1b588e23cf80..21291f1abcd6 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
| @@ -284,6 +284,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, | |||
| 284 | if (!netdev->dcbnl_ops->getpermhwaddr) | 284 | if (!netdev->dcbnl_ops->getpermhwaddr) |
| 285 | return -EOPNOTSUPP; | 285 | return -EOPNOTSUPP; |
| 286 | 286 | ||
| 287 | memset(perm_addr, 0, sizeof(perm_addr)); | ||
| 287 | netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); | 288 | netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); |
| 288 | 289 | ||
| 289 | return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); | 290 | return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); |
| @@ -1042,6 +1043,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1042 | 1043 | ||
| 1043 | if (ops->ieee_getets) { | 1044 | if (ops->ieee_getets) { |
| 1044 | struct ieee_ets ets; | 1045 | struct ieee_ets ets; |
| 1046 | memset(&ets, 0, sizeof(ets)); | ||
| 1045 | err = ops->ieee_getets(netdev, &ets); | 1047 | err = ops->ieee_getets(netdev, &ets); |
| 1046 | if (!err && | 1048 | if (!err && |
| 1047 | nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) | 1049 | nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) |
| @@ -1050,6 +1052,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1050 | 1052 | ||
| 1051 | if (ops->ieee_getmaxrate) { | 1053 | if (ops->ieee_getmaxrate) { |
| 1052 | struct ieee_maxrate maxrate; | 1054 | struct ieee_maxrate maxrate; |
| 1055 | memset(&maxrate, 0, sizeof(maxrate)); | ||
| 1053 | err = ops->ieee_getmaxrate(netdev, &maxrate); | 1056 | err = ops->ieee_getmaxrate(netdev, &maxrate); |
| 1054 | if (!err) { | 1057 | if (!err) { |
| 1055 | err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, | 1058 | err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, |
| @@ -1061,6 +1064,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1061 | 1064 | ||
| 1062 | if (ops->ieee_getpfc) { | 1065 | if (ops->ieee_getpfc) { |
| 1063 | struct ieee_pfc pfc; | 1066 | struct ieee_pfc pfc; |
| 1067 | memset(&pfc, 0, sizeof(pfc)); | ||
| 1064 | err = ops->ieee_getpfc(netdev, &pfc); | 1068 | err = ops->ieee_getpfc(netdev, &pfc); |
| 1065 | if (!err && | 1069 | if (!err && |
| 1066 | nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) | 1070 | nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) |
| @@ -1094,6 +1098,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1094 | /* get peer info if available */ | 1098 | /* get peer info if available */ |
| 1095 | if (ops->ieee_peer_getets) { | 1099 | if (ops->ieee_peer_getets) { |
| 1096 | struct ieee_ets ets; | 1100 | struct ieee_ets ets; |
| 1101 | memset(&ets, 0, sizeof(ets)); | ||
| 1097 | err = ops->ieee_peer_getets(netdev, &ets); | 1102 | err = ops->ieee_peer_getets(netdev, &ets); |
| 1098 | if (!err && | 1103 | if (!err && |
| 1099 | nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) | 1104 | nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) |
| @@ -1102,6 +1107,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1102 | 1107 | ||
| 1103 | if (ops->ieee_peer_getpfc) { | 1108 | if (ops->ieee_peer_getpfc) { |
| 1104 | struct ieee_pfc pfc; | 1109 | struct ieee_pfc pfc; |
| 1110 | memset(&pfc, 0, sizeof(pfc)); | ||
| 1105 | err = ops->ieee_peer_getpfc(netdev, &pfc); | 1111 | err = ops->ieee_peer_getpfc(netdev, &pfc); |
| 1106 | if (!err && | 1112 | if (!err && |
| 1107 | nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) | 1113 | nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) |
| @@ -1280,6 +1286,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1280 | /* peer info if available */ | 1286 | /* peer info if available */ |
| 1281 | if (ops->cee_peer_getpg) { | 1287 | if (ops->cee_peer_getpg) { |
| 1282 | struct cee_pg pg; | 1288 | struct cee_pg pg; |
| 1289 | memset(&pg, 0, sizeof(pg)); | ||
| 1283 | err = ops->cee_peer_getpg(netdev, &pg); | 1290 | err = ops->cee_peer_getpg(netdev, &pg); |
| 1284 | if (!err && | 1291 | if (!err && |
| 1285 | nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) | 1292 | nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) |
| @@ -1288,6 +1295,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) | |||
| 1288 | 1295 | ||
| 1289 | if (ops->cee_peer_getpfc) { | 1296 | if (ops->cee_peer_getpfc) { |
| 1290 | struct cee_pfc pfc; | 1297 | struct cee_pfc pfc; |
| 1298 | memset(&pfc, 0, sizeof(pfc)); | ||
| 1291 | err = ops->cee_peer_getpfc(netdev, &pfc); | 1299 | err = ops->cee_peer_getpfc(netdev, &pfc); |
| 1292 | if (!err && | 1300 | if (!err && |
| 1293 | nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) | 1301 | nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) |
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index 8c2251fb0a3f..bba5f8336317 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h | |||
| @@ -84,7 +84,7 @@ | |||
| 84 | (memcmp(addr1, addr2, length >> 3) == 0) | 84 | (memcmp(addr1, addr2, length >> 3) == 0) |
| 85 | 85 | ||
| 86 | /* local link, i.e. FE80::/10 */ | 86 | /* local link, i.e. FE80::/10 */ |
| 87 | #define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE) | 87 | #define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80)) |
| 88 | 88 | ||
| 89 | /* | 89 | /* |
| 90 | * check whether we can compress the IID to 16 bits, | 90 | * check whether we can compress the IID to 16 bits, |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 68f6a94f7661..c929d9c1c4b6 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
| 1333 | iph->frag_off |= htons(IP_MF); | 1333 | iph->frag_off |= htons(IP_MF); |
| 1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); | 1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); |
| 1335 | } else { | 1335 | } else { |
| 1336 | if (!(iph->frag_off & htons(IP_DF))) | 1336 | iph->id = htons(id++); |
| 1337 | iph->id = htons(id++); | ||
| 1338 | } | 1337 | } |
| 1339 | iph->tot_len = htons(skb->len - skb->mac_len); | 1338 | iph->tot_len = htons(skb->len - skb->mac_len); |
| 1340 | iph->check = 0; | 1339 | iph->check = 0; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f678507bc829..c6287cd978c2 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
| @@ -587,13 +587,16 @@ static void check_lifetime(struct work_struct *work) | |||
| 587 | { | 587 | { |
| 588 | unsigned long now, next, next_sec, next_sched; | 588 | unsigned long now, next, next_sec, next_sched; |
| 589 | struct in_ifaddr *ifa; | 589 | struct in_ifaddr *ifa; |
| 590 | struct hlist_node *n; | ||
| 590 | int i; | 591 | int i; |
| 591 | 592 | ||
| 592 | now = jiffies; | 593 | now = jiffies; |
| 593 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); | 594 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); |
| 594 | 595 | ||
| 595 | rcu_read_lock(); | ||
| 596 | for (i = 0; i < IN4_ADDR_HSIZE; i++) { | 596 | for (i = 0; i < IN4_ADDR_HSIZE; i++) { |
| 597 | bool change_needed = false; | ||
| 598 | |||
| 599 | rcu_read_lock(); | ||
| 597 | hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { | 600 | hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { |
| 598 | unsigned long age; | 601 | unsigned long age; |
| 599 | 602 | ||
| @@ -606,16 +609,7 @@ static void check_lifetime(struct work_struct *work) | |||
| 606 | 609 | ||
| 607 | if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && | 610 | if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && |
| 608 | age >= ifa->ifa_valid_lft) { | 611 | age >= ifa->ifa_valid_lft) { |
| 609 | struct in_ifaddr **ifap ; | 612 | change_needed = true; |
| 610 | |||
| 611 | rtnl_lock(); | ||
| 612 | for (ifap = &ifa->ifa_dev->ifa_list; | ||
| 613 | *ifap != NULL; ifap = &ifa->ifa_next) { | ||
| 614 | if (*ifap == ifa) | ||
| 615 | inet_del_ifa(ifa->ifa_dev, | ||
| 616 | ifap, 1); | ||
| 617 | } | ||
| 618 | rtnl_unlock(); | ||
| 619 | } else if (ifa->ifa_preferred_lft == | 613 | } else if (ifa->ifa_preferred_lft == |
| 620 | INFINITY_LIFE_TIME) { | 614 | INFINITY_LIFE_TIME) { |
| 621 | continue; | 615 | continue; |
| @@ -625,10 +619,8 @@ static void check_lifetime(struct work_struct *work) | |||
| 625 | next = ifa->ifa_tstamp + | 619 | next = ifa->ifa_tstamp + |
| 626 | ifa->ifa_valid_lft * HZ; | 620 | ifa->ifa_valid_lft * HZ; |
| 627 | 621 | ||
| 628 | if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) { | 622 | if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) |
| 629 | ifa->ifa_flags |= IFA_F_DEPRECATED; | 623 | change_needed = true; |
| 630 | rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); | ||
| 631 | } | ||
| 632 | } else if (time_before(ifa->ifa_tstamp + | 624 | } else if (time_before(ifa->ifa_tstamp + |
| 633 | ifa->ifa_preferred_lft * HZ, | 625 | ifa->ifa_preferred_lft * HZ, |
| 634 | next)) { | 626 | next)) { |
| @@ -636,8 +628,42 @@ static void check_lifetime(struct work_struct *work) | |||
| 636 | ifa->ifa_preferred_lft * HZ; | 628 | ifa->ifa_preferred_lft * HZ; |
| 637 | } | 629 | } |
| 638 | } | 630 | } |
| 631 | rcu_read_unlock(); | ||
| 632 | if (!change_needed) | ||
| 633 | continue; | ||
| 634 | rtnl_lock(); | ||
| 635 | hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) { | ||
| 636 | unsigned long age; | ||
| 637 | |||
| 638 | if (ifa->ifa_flags & IFA_F_PERMANENT) | ||
| 639 | continue; | ||
| 640 | |||
| 641 | /* We try to batch several events at once. */ | ||
| 642 | age = (now - ifa->ifa_tstamp + | ||
| 643 | ADDRCONF_TIMER_FUZZ_MINUS) / HZ; | ||
| 644 | |||
| 645 | if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && | ||
| 646 | age >= ifa->ifa_valid_lft) { | ||
| 647 | struct in_ifaddr **ifap; | ||
| 648 | |||
| 649 | for (ifap = &ifa->ifa_dev->ifa_list; | ||
| 650 | *ifap != NULL; ifap = &(*ifap)->ifa_next) { | ||
| 651 | if (*ifap == ifa) { | ||
| 652 | inet_del_ifa(ifa->ifa_dev, | ||
| 653 | ifap, 1); | ||
| 654 | break; | ||
| 655 | } | ||
| 656 | } | ||
| 657 | } else if (ifa->ifa_preferred_lft != | ||
| 658 | INFINITY_LIFE_TIME && | ||
| 659 | age >= ifa->ifa_preferred_lft && | ||
| 660 | !(ifa->ifa_flags & IFA_F_DEPRECATED)) { | ||
| 661 | ifa->ifa_flags |= IFA_F_DEPRECATED; | ||
| 662 | rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); | ||
| 663 | } | ||
| 664 | } | ||
| 665 | rtnl_unlock(); | ||
| 639 | } | 666 | } |
| 640 | rcu_read_unlock(); | ||
| 641 | 667 | ||
| 642 | next_sec = round_jiffies_up(next); | 668 | next_sec = round_jiffies_up(next); |
| 643 | next_sched = next; | 669 | next_sched = next; |
| @@ -802,8 +828,12 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg | |||
| 802 | if (nlh->nlmsg_flags & NLM_F_EXCL || | 828 | if (nlh->nlmsg_flags & NLM_F_EXCL || |
| 803 | !(nlh->nlmsg_flags & NLM_F_REPLACE)) | 829 | !(nlh->nlmsg_flags & NLM_F_REPLACE)) |
| 804 | return -EEXIST; | 830 | return -EEXIST; |
| 805 | 831 | ifa = ifa_existing; | |
| 806 | set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft); | 832 | set_ifa_lifetime(ifa, valid_lft, prefered_lft); |
| 833 | cancel_delayed_work(&check_lifetime_work); | ||
| 834 | schedule_delayed_work(&check_lifetime_work, 0); | ||
| 835 | rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); | ||
| 836 | blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); | ||
| 807 | } | 837 | } |
| 808 | return 0; | 838 | return 0; |
| 809 | } | 839 | } |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 3b4f0cd2e63e..4cfe34d4cc96 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
| @@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
| 139 | 139 | ||
| 140 | /* skb is pure payload to encrypt */ | 140 | /* skb is pure payload to encrypt */ |
| 141 | 141 | ||
| 142 | err = -ENOMEM; | ||
| 143 | |||
| 144 | esp = x->data; | 142 | esp = x->data; |
| 145 | aead = esp->aead; | 143 | aead = esp->aead; |
| 146 | alen = crypto_aead_authsize(aead); | 144 | alen = crypto_aead_authsize(aead); |
| @@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
| 176 | } | 174 | } |
| 177 | 175 | ||
| 178 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); | 176 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); |
| 179 | if (!tmp) | 177 | if (!tmp) { |
| 178 | err = -ENOMEM; | ||
| 180 | goto error; | 179 | goto error; |
| 180 | } | ||
| 181 | 181 | ||
| 182 | seqhi = esp_tmp_seqhi(tmp); | 182 | seqhi = esp_tmp_seqhi(tmp); |
| 183 | iv = esp_tmp_iv(aead, tmp, seqhilen); | 183 | iv = esp_tmp_iv(aead, tmp, seqhilen); |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7d1874be1df3..786d97aee751 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
| @@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock); | |||
| 735 | * tcp/dccp_create_openreq_child(). | 735 | * tcp/dccp_create_openreq_child(). |
| 736 | */ | 736 | */ |
| 737 | void inet_csk_prepare_forced_close(struct sock *sk) | 737 | void inet_csk_prepare_forced_close(struct sock *sk) |
| 738 | __releases(&sk->sk_lock.slock) | ||
| 738 | { | 739 | { |
| 739 | /* sk_clone_lock locked the socket and set refcnt to 2 */ | 740 | /* sk_clone_lock locked the socket and set refcnt to 2 */ |
| 740 | bh_unlock_sock(sk); | 741 | bh_unlock_sock(sk); |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 245ae078a07f..f4fd23de9b13 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/rtnetlink.h> | 21 | #include <linux/rtnetlink.h> |
| 22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 23 | 23 | ||
| 24 | #include <net/sock.h> | ||
| 24 | #include <net/inet_frag.h> | 25 | #include <net/inet_frag.h> |
| 25 | 26 | ||
| 26 | static void inet_frag_secret_rebuild(unsigned long dummy) | 27 | static void inet_frag_secret_rebuild(unsigned long dummy) |
| @@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
| 277 | __releases(&f->lock) | 278 | __releases(&f->lock) |
| 278 | { | 279 | { |
| 279 | struct inet_frag_queue *q; | 280 | struct inet_frag_queue *q; |
| 281 | int depth = 0; | ||
| 280 | 282 | ||
| 281 | hlist_for_each_entry(q, &f->hash[hash], list) { | 283 | hlist_for_each_entry(q, &f->hash[hash], list) { |
| 282 | if (q->net == nf && f->match(q, key)) { | 284 | if (q->net == nf && f->match(q, key)) { |
| @@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
| 284 | read_unlock(&f->lock); | 286 | read_unlock(&f->lock); |
| 285 | return q; | 287 | return q; |
| 286 | } | 288 | } |
| 289 | depth++; | ||
| 287 | } | 290 | } |
| 288 | read_unlock(&f->lock); | 291 | read_unlock(&f->lock); |
| 289 | 292 | ||
| 290 | return inet_frag_create(nf, f, key); | 293 | if (depth <= INETFRAGS_MAXDEPTH) |
| 294 | return inet_frag_create(nf, f, key); | ||
| 295 | else | ||
| 296 | return ERR_PTR(-ENOBUFS); | ||
| 291 | } | 297 | } |
| 292 | EXPORT_SYMBOL(inet_frag_find); | 298 | EXPORT_SYMBOL(inet_frag_find); |
| 299 | |||
| 300 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, | ||
| 301 | const char *prefix) | ||
| 302 | { | ||
| 303 | static const char msg[] = "inet_frag_find: Fragment hash bucket" | ||
| 304 | " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) | ||
| 305 | ". Dropping fragment.\n"; | ||
| 306 | |||
| 307 | if (PTR_ERR(q) == -ENOBUFS) | ||
| 308 | LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); | ||
| 309 | } | ||
| 310 | EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index b6d30acb600c..52c273ea05c3 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
| @@ -248,8 +248,7 @@ static void ip_expire(unsigned long arg) | |||
| 248 | if (!head->dev) | 248 | if (!head->dev) |
| 249 | goto out_rcu_unlock; | 249 | goto out_rcu_unlock; |
| 250 | 250 | ||
| 251 | /* skb dst is stale, drop it, and perform route lookup again */ | 251 | /* skb has no dst, perform route lookup again */ |
| 252 | skb_dst_drop(head); | ||
| 253 | iph = ip_hdr(head); | 252 | iph = ip_hdr(head); |
| 254 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, | 253 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, |
| 255 | iph->tos, head->dev); | 254 | iph->tos, head->dev); |
| @@ -292,14 +291,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) | |||
| 292 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); | 291 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); |
| 293 | 292 | ||
| 294 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); | 293 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); |
| 295 | if (q == NULL) | 294 | if (IS_ERR_OR_NULL(q)) { |
| 296 | goto out_nomem; | 295 | inet_frag_maybe_warn_overflow(q, pr_fmt()); |
| 297 | 296 | return NULL; | |
| 297 | } | ||
| 298 | return container_of(q, struct ipq, q); | 298 | return container_of(q, struct ipq, q); |
| 299 | |||
| 300 | out_nomem: | ||
| 301 | LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); | ||
| 302 | return NULL; | ||
| 303 | } | 299 | } |
| 304 | 300 | ||
| 305 | /* Is the fragment too far ahead to be part of ipq? */ | 301 | /* Is the fragment too far ahead to be part of ipq? */ |
| @@ -526,9 +522,16 @@ found: | |||
| 526 | qp->q.max_size = skb->len + ihl; | 522 | qp->q.max_size = skb->len + ihl; |
| 527 | 523 | ||
| 528 | if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && | 524 | if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
| 529 | qp->q.meat == qp->q.len) | 525 | qp->q.meat == qp->q.len) { |
| 530 | return ip_frag_reasm(qp, prev, dev); | 526 | unsigned long orefdst = skb->_skb_refdst; |
| 527 | |||
| 528 | skb->_skb_refdst = 0UL; | ||
| 529 | err = ip_frag_reasm(qp, prev, dev); | ||
| 530 | skb->_skb_refdst = orefdst; | ||
| 531 | return err; | ||
| 532 | } | ||
| 531 | 533 | ||
| 534 | skb_dst_drop(skb); | ||
| 532 | inet_frag_lru_move(&qp->q); | 535 | inet_frag_lru_move(&qp->q); |
| 533 | return -EINPROGRESS; | 536 | return -EINPROGRESS; |
| 534 | 537 | ||
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d0ef0e674ec5..91d66dbde9c0 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
| 798 | 798 | ||
| 799 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { | 799 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { |
| 800 | gre_hlen = 0; | 800 | gre_hlen = 0; |
| 801 | if (skb->protocol == htons(ETH_P_IP)) | 801 | tiph = (const struct iphdr *)skb->data; |
| 802 | tiph = (const struct iphdr *)skb->data; | ||
| 803 | else | ||
| 804 | tiph = &tunnel->parms.iph; | ||
| 805 | } else { | 802 | } else { |
| 806 | gre_hlen = tunnel->hlen; | 803 | gre_hlen = tunnel->hlen; |
| 807 | tiph = &tunnel->parms.iph; | 804 | tiph = &tunnel->parms.iph; |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 87abd3e2bd32..2bdf802e28e2 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
| @@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb) | |||
| 228 | icmp_send(skb, ICMP_DEST_UNREACH, | 228 | icmp_send(skb, ICMP_DEST_UNREACH, |
| 229 | ICMP_PROT_UNREACH, 0); | 229 | ICMP_PROT_UNREACH, 0); |
| 230 | } | 230 | } |
| 231 | } else | 231 | kfree_skb(skb); |
| 232 | } else { | ||
| 232 | IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); | 233 | IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); |
| 233 | kfree_skb(skb); | 234 | consume_skb(skb); |
| 235 | } | ||
| 234 | } | 236 | } |
| 235 | } | 237 | } |
| 236 | out: | 238 | out: |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index f6289bf6f332..ec7264514a82 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
| @@ -370,7 +370,6 @@ int ip_options_compile(struct net *net, | |||
| 370 | } | 370 | } |
| 371 | switch (optptr[3]&0xF) { | 371 | switch (optptr[3]&0xF) { |
| 372 | case IPOPT_TS_TSONLY: | 372 | case IPOPT_TS_TSONLY: |
| 373 | opt->ts = optptr - iph; | ||
| 374 | if (skb) | 373 | if (skb) |
| 375 | timeptr = &optptr[optptr[2]-1]; | 374 | timeptr = &optptr[optptr[2]-1]; |
| 376 | opt->ts_needtime = 1; | 375 | opt->ts_needtime = 1; |
| @@ -381,7 +380,6 @@ int ip_options_compile(struct net *net, | |||
| 381 | pp_ptr = optptr + 2; | 380 | pp_ptr = optptr + 2; |
| 382 | goto error; | 381 | goto error; |
| 383 | } | 382 | } |
| 384 | opt->ts = optptr - iph; | ||
| 385 | if (rt) { | 383 | if (rt) { |
| 386 | spec_dst_fill(&spec_dst, skb); | 384 | spec_dst_fill(&spec_dst, skb); |
| 387 | memcpy(&optptr[optptr[2]-1], &spec_dst, 4); | 385 | memcpy(&optptr[optptr[2]-1], &spec_dst, 4); |
| @@ -396,7 +394,6 @@ int ip_options_compile(struct net *net, | |||
| 396 | pp_ptr = optptr + 2; | 394 | pp_ptr = optptr + 2; |
| 397 | goto error; | 395 | goto error; |
| 398 | } | 396 | } |
| 399 | opt->ts = optptr - iph; | ||
| 400 | { | 397 | { |
| 401 | __be32 addr; | 398 | __be32 addr; |
| 402 | memcpy(&addr, &optptr[optptr[2]-1], 4); | 399 | memcpy(&addr, &optptr[optptr[2]-1], 4); |
| @@ -423,18 +420,18 @@ int ip_options_compile(struct net *net, | |||
| 423 | put_unaligned_be32(midtime, timeptr); | 420 | put_unaligned_be32(midtime, timeptr); |
| 424 | opt->is_changed = 1; | 421 | opt->is_changed = 1; |
| 425 | } | 422 | } |
| 426 | } else { | 423 | } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) { |
| 427 | unsigned int overflow = optptr[3]>>4; | 424 | unsigned int overflow = optptr[3]>>4; |
| 428 | if (overflow == 15) { | 425 | if (overflow == 15) { |
| 429 | pp_ptr = optptr + 3; | 426 | pp_ptr = optptr + 3; |
| 430 | goto error; | 427 | goto error; |
| 431 | } | 428 | } |
| 432 | opt->ts = optptr - iph; | ||
| 433 | if (skb) { | 429 | if (skb) { |
| 434 | optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); | 430 | optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); |
| 435 | opt->is_changed = 1; | 431 | opt->is_changed = 1; |
| 436 | } | 432 | } |
| 437 | } | 433 | } |
| 434 | opt->ts = optptr - iph; | ||
| 438 | break; | 435 | break; |
| 439 | case IPOPT_RA: | 436 | case IPOPT_RA: |
| 440 | if (optlen < 4) { | 437 | if (optlen < 4) { |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 98cbc6877019..bf6c5cf31aed 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
| @@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void) | |||
| 1522 | } | 1522 | } |
| 1523 | for (i++; i < CONF_NAMESERVERS_MAX; i++) | 1523 | for (i++; i < CONF_NAMESERVERS_MAX; i++) |
| 1524 | if (ic_nameservers[i] != NONE) | 1524 | if (ic_nameservers[i] != NONE) |
| 1525 | pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]); | 1525 | pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]); |
| 1526 | pr_cont("\n"); | ||
| 1526 | #endif /* !SILENT */ | 1527 | #endif /* !SILENT */ |
| 1527 | 1528 | ||
| 1528 | return 0; | 1529 | return 0; |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index ce2d43e1f09f..0d755c50994b 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
| @@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT | |||
| 36 | 36 | ||
| 37 | If unsure, say Y. | 37 | If unsure, say Y. |
| 38 | 38 | ||
| 39 | config IP_NF_QUEUE | ||
| 40 | tristate "IP Userspace queueing via NETLINK (OBSOLETE)" | ||
| 41 | depends on NETFILTER_ADVANCED | ||
| 42 | help | ||
| 43 | Netfilter has the ability to queue packets to user space: the | ||
| 44 | netlink device can be used to access them using this driver. | ||
| 45 | |||
| 46 | This option enables the old IPv4-only "ip_queue" implementation | ||
| 47 | which has been obsoleted by the new "nfnetlink_queue" code (see | ||
| 48 | CONFIG_NETFILTER_NETLINK_QUEUE). | ||
| 49 | |||
| 50 | To compile it as a module, choose M here. If unsure, say N. | ||
| 51 | |||
| 52 | config IP_NF_IPTABLES | 39 | config IP_NF_IPTABLES |
| 53 | tristate "IP tables support (required for filtering/masq/NAT)" | 40 | tristate "IP tables support (required for filtering/masq/NAT)" |
| 54 | default m if NETFILTER_ADVANCED=n | 41 | default m if NETFILTER_ADVANCED=n |
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index c30130062cd6..c49dcd0284a0 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c | |||
| @@ -66,6 +66,12 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4, | |||
| 66 | return dev_match; | 66 | return dev_match; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static bool rpfilter_is_local(const struct sk_buff *skb) | ||
| 70 | { | ||
| 71 | const struct rtable *rt = skb_rtable(skb); | ||
| 72 | return rt && (rt->rt_flags & RTCF_LOCAL); | ||
| 73 | } | ||
| 74 | |||
| 69 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | 75 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) |
| 70 | { | 76 | { |
| 71 | const struct xt_rpfilter_info *info; | 77 | const struct xt_rpfilter_info *info; |
| @@ -76,7 +82,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 76 | info = par->matchinfo; | 82 | info = par->matchinfo; |
| 77 | invert = info->flags & XT_RPFILTER_INVERT; | 83 | invert = info->flags & XT_RPFILTER_INVERT; |
| 78 | 84 | ||
| 79 | if (par->in->flags & IFF_LOOPBACK) | 85 | if (rpfilter_is_local(skb)) |
| 80 | return true ^ invert; | 86 | return true ^ invert; |
| 81 | 87 | ||
| 82 | iph = ip_hdr(skb); | 88 | iph = ip_hdr(skb); |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index ef54377fb11c..397e0f69435f 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
| @@ -349,8 +349,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
| 349 | * hasn't changed since we received the original syn, but I see | 349 | * hasn't changed since we received the original syn, but I see |
| 350 | * no easy way to do this. | 350 | * no easy way to do this. |
| 351 | */ | 351 | */ |
| 352 | flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), | 352 | flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, |
| 353 | RT_SCOPE_UNIVERSE, IPPROTO_TCP, | 353 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, |
| 354 | inet_sk_flowi_flags(sk), | 354 | inet_sk_flowi_flags(sk), |
| 355 | (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, | 355 | (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, |
| 356 | ireq->loc_addr, th->source, th->dest); | 356 | ireq->loc_addr, th->source, th->dest); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 47e854fcae24..e22020790709 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |||
| 775 | * Make sure that we have exactly size bytes | 775 | * Make sure that we have exactly size bytes |
| 776 | * available to the caller, no more, no less. | 776 | * available to the caller, no more, no less. |
| 777 | */ | 777 | */ |
| 778 | skb->avail_size = size; | 778 | skb->reserved_tailroom = skb->end - skb->tail - size; |
| 779 | return skb; | 779 | return skb; |
| 780 | } | 780 | } |
| 781 | __kfree_skb(skb); | 781 | __kfree_skb(skb); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a759e19496d2..13b9c08fc158 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -113,6 +113,7 @@ int sysctl_tcp_early_retrans __read_mostly = 2; | |||
| 113 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ | 113 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ |
| 114 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ | 114 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ |
| 115 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ | 115 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ |
| 116 | #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ | ||
| 116 | 117 | ||
| 117 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) | 118 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) |
| 118 | #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) | 119 | #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) |
| @@ -2059,11 +2060,8 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
| 2059 | if (tcp_is_reno(tp)) | 2060 | if (tcp_is_reno(tp)) |
| 2060 | tcp_reset_reno_sack(tp); | 2061 | tcp_reset_reno_sack(tp); |
| 2061 | 2062 | ||
| 2062 | if (!how) { | 2063 | tp->undo_marker = tp->snd_una; |
| 2063 | /* Push undo marker, if it was plain RTO and nothing | 2064 | if (how) { |
| 2064 | * was retransmitted. */ | ||
| 2065 | tp->undo_marker = tp->snd_una; | ||
| 2066 | } else { | ||
| 2067 | tp->sacked_out = 0; | 2065 | tp->sacked_out = 0; |
| 2068 | tp->fackets_out = 0; | 2066 | tp->fackets_out = 0; |
| 2069 | } | 2067 | } |
| @@ -3567,6 +3565,27 @@ static void tcp_send_challenge_ack(struct sock *sk) | |||
| 3567 | } | 3565 | } |
| 3568 | } | 3566 | } |
| 3569 | 3567 | ||
| 3568 | static void tcp_store_ts_recent(struct tcp_sock *tp) | ||
| 3569 | { | ||
| 3570 | tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; | ||
| 3571 | tp->rx_opt.ts_recent_stamp = get_seconds(); | ||
| 3572 | } | ||
| 3573 | |||
| 3574 | static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) | ||
| 3575 | { | ||
| 3576 | if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { | ||
| 3577 | /* PAWS bug workaround wrt. ACK frames, the PAWS discard | ||
| 3578 | * extra check below makes sure this can only happen | ||
| 3579 | * for pure ACK frames. -DaveM | ||
| 3580 | * | ||
| 3581 | * Not only, also it occurs for expired timestamps. | ||
| 3582 | */ | ||
| 3583 | |||
| 3584 | if (tcp_paws_check(&tp->rx_opt, 0)) | ||
| 3585 | tcp_store_ts_recent(tp); | ||
| 3586 | } | ||
| 3587 | } | ||
| 3588 | |||
| 3570 | /* This routine deals with incoming acks, but not outgoing ones. */ | 3589 | /* This routine deals with incoming acks, but not outgoing ones. */ |
| 3571 | static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | 3590 | static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
| 3572 | { | 3591 | { |
| @@ -3610,6 +3629,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
| 3610 | prior_fackets = tp->fackets_out; | 3629 | prior_fackets = tp->fackets_out; |
| 3611 | prior_in_flight = tcp_packets_in_flight(tp); | 3630 | prior_in_flight = tcp_packets_in_flight(tp); |
| 3612 | 3631 | ||
| 3632 | /* ts_recent update must be made after we are sure that the packet | ||
| 3633 | * is in window. | ||
| 3634 | */ | ||
| 3635 | if (flag & FLAG_UPDATE_TS_RECENT) | ||
| 3636 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
| 3637 | |||
| 3613 | if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { | 3638 | if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { |
| 3614 | /* Window is constant, pure forward advance. | 3639 | /* Window is constant, pure forward advance. |
| 3615 | * No more checks are required. | 3640 | * No more checks are required. |
| @@ -3930,27 +3955,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) | |||
| 3930 | EXPORT_SYMBOL(tcp_parse_md5sig_option); | 3955 | EXPORT_SYMBOL(tcp_parse_md5sig_option); |
| 3931 | #endif | 3956 | #endif |
| 3932 | 3957 | ||
| 3933 | static inline void tcp_store_ts_recent(struct tcp_sock *tp) | ||
| 3934 | { | ||
| 3935 | tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; | ||
| 3936 | tp->rx_opt.ts_recent_stamp = get_seconds(); | ||
| 3937 | } | ||
| 3938 | |||
| 3939 | static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) | ||
| 3940 | { | ||
| 3941 | if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { | ||
| 3942 | /* PAWS bug workaround wrt. ACK frames, the PAWS discard | ||
| 3943 | * extra check below makes sure this can only happen | ||
| 3944 | * for pure ACK frames. -DaveM | ||
| 3945 | * | ||
| 3946 | * Not only, also it occurs for expired timestamps. | ||
| 3947 | */ | ||
| 3948 | |||
| 3949 | if (tcp_paws_check(&tp->rx_opt, 0)) | ||
| 3950 | tcp_store_ts_recent(tp); | ||
| 3951 | } | ||
| 3952 | } | ||
| 3953 | |||
| 3954 | /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM | 3958 | /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM |
| 3955 | * | 3959 | * |
| 3956 | * It is not fatal. If this ACK does _not_ change critical state (seqs, window) | 3960 | * It is not fatal. If this ACK does _not_ change critical state (seqs, window) |
| @@ -5485,6 +5489,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
| 5485 | if (tcp_checksum_complete_user(sk, skb)) | 5489 | if (tcp_checksum_complete_user(sk, skb)) |
| 5486 | goto csum_error; | 5490 | goto csum_error; |
| 5487 | 5491 | ||
| 5492 | if ((int)skb->truesize > sk->sk_forward_alloc) | ||
| 5493 | goto step5; | ||
| 5494 | |||
| 5488 | /* Predicted packet is in window by definition. | 5495 | /* Predicted packet is in window by definition. |
| 5489 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. | 5496 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. |
| 5490 | * Hence, check seq<=rcv_wup reduces to: | 5497 | * Hence, check seq<=rcv_wup reduces to: |
| @@ -5496,9 +5503,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
| 5496 | 5503 | ||
| 5497 | tcp_rcv_rtt_measure_ts(sk, skb); | 5504 | tcp_rcv_rtt_measure_ts(sk, skb); |
| 5498 | 5505 | ||
| 5499 | if ((int)skb->truesize > sk->sk_forward_alloc) | ||
| 5500 | goto step5; | ||
| 5501 | |||
| 5502 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); | 5506 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); |
| 5503 | 5507 | ||
| 5504 | /* Bulk data transfer: receiver */ | 5508 | /* Bulk data transfer: receiver */ |
| @@ -5546,14 +5550,9 @@ slow_path: | |||
| 5546 | return 0; | 5550 | return 0; |
| 5547 | 5551 | ||
| 5548 | step5: | 5552 | step5: |
| 5549 | if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) | 5553 | if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) |
| 5550 | goto discard; | 5554 | goto discard; |
| 5551 | 5555 | ||
| 5552 | /* ts_recent update must be made after we are sure that the packet | ||
| 5553 | * is in window. | ||
| 5554 | */ | ||
| 5555 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
| 5556 | |||
| 5557 | tcp_rcv_rtt_measure_ts(sk, skb); | 5556 | tcp_rcv_rtt_measure_ts(sk, skb); |
| 5558 | 5557 | ||
| 5559 | /* Process urgent data. */ | 5558 | /* Process urgent data. */ |
| @@ -5989,7 +5988,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 5989 | 5988 | ||
| 5990 | /* step 5: check the ACK field */ | 5989 | /* step 5: check the ACK field */ |
| 5991 | if (true) { | 5990 | if (true) { |
| 5992 | int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; | 5991 | int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | |
| 5992 | FLAG_UPDATE_TS_RECENT) > 0; | ||
| 5993 | 5993 | ||
| 5994 | switch (sk->sk_state) { | 5994 | switch (sk->sk_state) { |
| 5995 | case TCP_SYN_RECV: | 5995 | case TCP_SYN_RECV: |
| @@ -6140,11 +6140,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 6140 | } | 6140 | } |
| 6141 | } | 6141 | } |
| 6142 | 6142 | ||
| 6143 | /* ts_recent update must be made after we are sure that the packet | ||
| 6144 | * is in window. | ||
| 6145 | */ | ||
| 6146 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
| 6147 | |||
| 6148 | /* step 6: check the URG bit */ | 6143 | /* step 6: check the URG bit */ |
| 6149 | tcp_urg(sk, skb, th); | 6144 | tcp_urg(sk, skb, th); |
| 6150 | 6145 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4a8ec457310f..d09203c63264 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk) | |||
| 274 | struct inet_sock *inet = inet_sk(sk); | 274 | struct inet_sock *inet = inet_sk(sk); |
| 275 | u32 mtu = tcp_sk(sk)->mtu_info; | 275 | u32 mtu = tcp_sk(sk)->mtu_info; |
| 276 | 276 | ||
| 277 | /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs | ||
| 278 | * send out by Linux are always <576bytes so they should go through | ||
| 279 | * unfragmented). | ||
| 280 | */ | ||
| 281 | if (sk->sk_state == TCP_LISTEN) | ||
| 282 | return; | ||
| 283 | |||
| 284 | dst = inet_csk_update_pmtu(sk, mtu); | 277 | dst = inet_csk_update_pmtu(sk, mtu); |
| 285 | if (!dst) | 278 | if (!dst) |
| 286 | return; | 279 | return; |
| @@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
| 408 | goto out; | 401 | goto out; |
| 409 | 402 | ||
| 410 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ | 403 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ |
| 404 | /* We are not interested in TCP_LISTEN and open_requests | ||
| 405 | * (SYN-ACKs send out by Linux are always <576bytes so | ||
| 406 | * they should go through unfragmented). | ||
| 407 | */ | ||
| 408 | if (sk->sk_state == TCP_LISTEN) | ||
| 409 | goto out; | ||
| 410 | |||
| 411 | tp->mtu_info = info; | 411 | tp->mtu_info = info; |
| 412 | if (!sock_owned_by_user(sk)) { | 412 | if (!sock_owned_by_user(sk)) { |
| 413 | tcp_v4_mtu_reduced(sk); | 413 | tcp_v4_mtu_reduced(sk); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e2b4461074da..509912a5ff98 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) | |||
| 1298 | eat = min_t(int, len, skb_headlen(skb)); | 1298 | eat = min_t(int, len, skb_headlen(skb)); |
| 1299 | if (eat) { | 1299 | if (eat) { |
| 1300 | __skb_pull(skb, eat); | 1300 | __skb_pull(skb, eat); |
| 1301 | skb->avail_size -= eat; | ||
| 1302 | len -= eat; | 1301 | len -= eat; |
| 1303 | if (!len) | 1302 | if (!len) |
| 1304 | return; | 1303 | return; |
| @@ -1810,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) | |||
| 1810 | goto send_now; | 1809 | goto send_now; |
| 1811 | } | 1810 | } |
| 1812 | 1811 | ||
| 1813 | /* Ok, it looks like it is advisable to defer. */ | 1812 | /* Ok, it looks like it is advisable to defer. |
| 1814 | tp->tso_deferred = 1 | (jiffies << 1); | 1813 | * Do not rearm the timer if already set to not break TCP ACK clocking. |
| 1814 | */ | ||
| 1815 | if (!tp->tso_deferred) | ||
| 1816 | tp->tso_deferred = 1 | (jiffies << 1); | ||
| 1815 | 1817 | ||
| 1816 | return true; | 1818 | return true; |
| 1817 | 1819 | ||
| @@ -2386,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
| 2386 | */ | 2388 | */ |
| 2387 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2389 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
| 2388 | 2390 | ||
| 2389 | /* make sure skb->data is aligned on arches that require it */ | 2391 | /* make sure skb->data is aligned on arches that require it |
| 2390 | if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { | 2392 | * and check if ack-trimming & collapsing extended the headroom |
| 2393 | * beyond what csum_start can cover. | ||
| 2394 | */ | ||
| 2395 | if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || | ||
| 2396 | skb_headroom(skb) >= 0xFFFF)) { | ||
| 2391 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, | 2397 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, |
| 2392 | GFP_ATOMIC); | 2398 | GFP_ATOMIC); |
| 2393 | return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : | 2399 | return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : |
| @@ -2707,6 +2713,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
| 2707 | skb_reserve(skb, MAX_TCP_HEADER); | 2713 | skb_reserve(skb, MAX_TCP_HEADER); |
| 2708 | 2714 | ||
| 2709 | skb_dst_set(skb, dst); | 2715 | skb_dst_set(skb, dst); |
| 2716 | security_skb_owned_by(skb, sk); | ||
| 2710 | 2717 | ||
| 2711 | mss = dst_metric_advmss(dst); | 2718 | mss = dst_metric_advmss(dst); |
| 2712 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) | 2719 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 265c42cf963c..0a073a263720 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb) | |||
| 1762 | 1762 | ||
| 1763 | void udp_destroy_sock(struct sock *sk) | 1763 | void udp_destroy_sock(struct sock *sk) |
| 1764 | { | 1764 | { |
| 1765 | struct udp_sock *up = udp_sk(sk); | ||
| 1765 | bool slow = lock_sock_fast(sk); | 1766 | bool slow = lock_sock_fast(sk); |
| 1766 | udp_flush_pending_frames(sk); | 1767 | udp_flush_pending_frames(sk); |
| 1767 | unlock_sock_fast(sk, slow); | 1768 | unlock_sock_fast(sk, slow); |
| 1769 | if (static_key_false(&udp_encap_needed) && up->encap_type) { | ||
| 1770 | void (*encap_destroy)(struct sock *sk); | ||
| 1771 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | ||
| 1772 | if (encap_destroy) | ||
| 1773 | encap_destroy(sk); | ||
| 1774 | } | ||
| 1768 | } | 1775 | } |
| 1769 | 1776 | ||
| 1770 | /* | 1777 | /* |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f2c7e615f902..dae802c0af7c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -168,8 +168,6 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev, | |||
| 168 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | 168 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, |
| 169 | struct net_device *dev); | 169 | struct net_device *dev); |
| 170 | 170 | ||
| 171 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); | ||
| 172 | |||
| 173 | static struct ipv6_devconf ipv6_devconf __read_mostly = { | 171 | static struct ipv6_devconf ipv6_devconf __read_mostly = { |
| 174 | .forwarding = 0, | 172 | .forwarding = 0, |
| 175 | .hop_limit = IPV6_DEFAULT_HOPLIMIT, | 173 | .hop_limit = IPV6_DEFAULT_HOPLIMIT, |
| @@ -837,7 +835,7 @@ out2: | |||
| 837 | rcu_read_unlock_bh(); | 835 | rcu_read_unlock_bh(); |
| 838 | 836 | ||
| 839 | if (likely(err == 0)) | 837 | if (likely(err == 0)) |
| 840 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa); | 838 | inet6addr_notifier_call_chain(NETDEV_UP, ifa); |
| 841 | else { | 839 | else { |
| 842 | kfree(ifa); | 840 | kfree(ifa); |
| 843 | ifa = ERR_PTR(err); | 841 | ifa = ERR_PTR(err); |
| @@ -927,7 +925,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
| 927 | 925 | ||
| 928 | ipv6_ifa_notify(RTM_DELADDR, ifp); | 926 | ipv6_ifa_notify(RTM_DELADDR, ifp); |
| 929 | 927 | ||
| 930 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp); | 928 | inet6addr_notifier_call_chain(NETDEV_DOWN, ifp); |
| 931 | 929 | ||
| 932 | /* | 930 | /* |
| 933 | * Purge or update corresponding prefix | 931 | * Purge or update corresponding prefix |
| @@ -2529,6 +2527,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
| 2529 | static void init_loopback(struct net_device *dev) | 2527 | static void init_loopback(struct net_device *dev) |
| 2530 | { | 2528 | { |
| 2531 | struct inet6_dev *idev; | 2529 | struct inet6_dev *idev; |
| 2530 | struct net_device *sp_dev; | ||
| 2531 | struct inet6_ifaddr *sp_ifa; | ||
| 2532 | struct rt6_info *sp_rt; | ||
| 2532 | 2533 | ||
| 2533 | /* ::1 */ | 2534 | /* ::1 */ |
| 2534 | 2535 | ||
| @@ -2540,6 +2541,30 @@ static void init_loopback(struct net_device *dev) | |||
| 2540 | } | 2541 | } |
| 2541 | 2542 | ||
| 2542 | add_addr(idev, &in6addr_loopback, 128, IFA_HOST); | 2543 | add_addr(idev, &in6addr_loopback, 128, IFA_HOST); |
| 2544 | |||
| 2545 | /* Add routes to other interface's IPv6 addresses */ | ||
| 2546 | for_each_netdev(dev_net(dev), sp_dev) { | ||
| 2547 | if (!strcmp(sp_dev->name, dev->name)) | ||
| 2548 | continue; | ||
| 2549 | |||
| 2550 | idev = __in6_dev_get(sp_dev); | ||
| 2551 | if (!idev) | ||
| 2552 | continue; | ||
| 2553 | |||
| 2554 | read_lock_bh(&idev->lock); | ||
| 2555 | list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { | ||
| 2556 | |||
| 2557 | if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) | ||
| 2558 | continue; | ||
| 2559 | |||
| 2560 | sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); | ||
| 2561 | |||
| 2562 | /* Failure cases are ignored */ | ||
| 2563 | if (!IS_ERR(sp_rt)) | ||
| 2564 | ip6_ins_rt(sp_rt); | ||
| 2565 | } | ||
| 2566 | read_unlock_bh(&idev->lock); | ||
| 2567 | } | ||
| 2543 | } | 2568 | } |
| 2544 | 2569 | ||
| 2545 | static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) | 2570 | static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) |
| @@ -2961,7 +2986,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
| 2961 | 2986 | ||
| 2962 | if (state != INET6_IFADDR_STATE_DEAD) { | 2987 | if (state != INET6_IFADDR_STATE_DEAD) { |
| 2963 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 2988 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
| 2964 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); | 2989 | inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); |
| 2965 | } | 2990 | } |
| 2966 | in6_ifa_put(ifa); | 2991 | in6_ifa_put(ifa); |
| 2967 | 2992 | ||
| @@ -4784,26 +4809,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev) | |||
| 4784 | 4809 | ||
| 4785 | static int __net_init addrconf_init_net(struct net *net) | 4810 | static int __net_init addrconf_init_net(struct net *net) |
| 4786 | { | 4811 | { |
| 4787 | int err; | 4812 | int err = -ENOMEM; |
| 4788 | struct ipv6_devconf *all, *dflt; | 4813 | struct ipv6_devconf *all, *dflt; |
| 4789 | 4814 | ||
| 4790 | err = -ENOMEM; | 4815 | all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL); |
| 4791 | all = &ipv6_devconf; | 4816 | if (all == NULL) |
| 4792 | dflt = &ipv6_devconf_dflt; | 4817 | goto err_alloc_all; |
| 4793 | 4818 | ||
| 4794 | if (!net_eq(net, &init_net)) { | 4819 | dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); |
| 4795 | all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL); | 4820 | if (dflt == NULL) |
| 4796 | if (all == NULL) | 4821 | goto err_alloc_dflt; |
| 4797 | goto err_alloc_all; | ||
| 4798 | 4822 | ||
| 4799 | dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); | 4823 | /* these will be inherited by all namespaces */ |
| 4800 | if (dflt == NULL) | 4824 | dflt->autoconf = ipv6_defaults.autoconf; |
| 4801 | goto err_alloc_dflt; | 4825 | dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; |
| 4802 | } else { | ||
| 4803 | /* these will be inherited by all namespaces */ | ||
| 4804 | dflt->autoconf = ipv6_defaults.autoconf; | ||
| 4805 | dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; | ||
| 4806 | } | ||
| 4807 | 4826 | ||
| 4808 | net->ipv6.devconf_all = all; | 4827 | net->ipv6.devconf_all = all; |
| 4809 | net->ipv6.devconf_dflt = dflt; | 4828 | net->ipv6.devconf_dflt = dflt; |
| @@ -4848,22 +4867,6 @@ static struct pernet_operations addrconf_ops = { | |||
| 4848 | .exit = addrconf_exit_net, | 4867 | .exit = addrconf_exit_net, |
| 4849 | }; | 4868 | }; |
| 4850 | 4869 | ||
| 4851 | /* | ||
| 4852 | * Device notifier | ||
| 4853 | */ | ||
| 4854 | |||
| 4855 | int register_inet6addr_notifier(struct notifier_block *nb) | ||
| 4856 | { | ||
| 4857 | return atomic_notifier_chain_register(&inet6addr_chain, nb); | ||
| 4858 | } | ||
| 4859 | EXPORT_SYMBOL(register_inet6addr_notifier); | ||
| 4860 | |||
| 4861 | int unregister_inet6addr_notifier(struct notifier_block *nb) | ||
| 4862 | { | ||
| 4863 | return atomic_notifier_chain_unregister(&inet6addr_chain, nb); | ||
| 4864 | } | ||
| 4865 | EXPORT_SYMBOL(unregister_inet6addr_notifier); | ||
| 4866 | |||
| 4867 | static struct rtnl_af_ops inet6_ops = { | 4870 | static struct rtnl_af_ops inet6_ops = { |
| 4868 | .family = AF_INET6, | 4871 | .family = AF_INET6, |
| 4869 | .fill_link_af = inet6_fill_link_af, | 4872 | .fill_link_af = inet6_fill_link_af, |
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index d051e5f4bf34..72104562c864 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c | |||
| @@ -78,3 +78,22 @@ int __ipv6_addr_type(const struct in6_addr *addr) | |||
| 78 | } | 78 | } |
| 79 | EXPORT_SYMBOL(__ipv6_addr_type); | 79 | EXPORT_SYMBOL(__ipv6_addr_type); |
| 80 | 80 | ||
| 81 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); | ||
| 82 | |||
| 83 | int register_inet6addr_notifier(struct notifier_block *nb) | ||
| 84 | { | ||
| 85 | return atomic_notifier_chain_register(&inet6addr_chain, nb); | ||
| 86 | } | ||
| 87 | EXPORT_SYMBOL(register_inet6addr_notifier); | ||
| 88 | |||
| 89 | int unregister_inet6addr_notifier(struct notifier_block *nb) | ||
| 90 | { | ||
| 91 | return atomic_notifier_chain_unregister(&inet6addr_chain, nb); | ||
| 92 | } | ||
| 93 | EXPORT_SYMBOL(unregister_inet6addr_notifier); | ||
| 94 | |||
| 95 | int inet6addr_notifier_call_chain(unsigned long val, void *v) | ||
| 96 | { | ||
| 97 | return atomic_notifier_call_chain(&inet6addr_chain, val, v); | ||
| 98 | } | ||
| 99 | EXPORT_SYMBOL(inet6addr_notifier_call_chain); | ||
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 5b10414e619e..2bab2aa59745 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
| @@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
| 118 | ipv6_addr_loopback(&hdr->daddr)) | 118 | ipv6_addr_loopback(&hdr->daddr)) |
| 119 | goto err; | 119 | goto err; |
| 120 | 120 | ||
| 121 | /* RFC4291 Errata ID: 3480 | ||
| 122 | * Interface-Local scope spans only a single interface on a | ||
| 123 | * node and is useful only for loopback transmission of | ||
| 124 | * multicast. Packets with interface-local scope received | ||
| 125 | * from another node must be discarded. | ||
| 126 | */ | ||
| 127 | if (!(skb->pkt_type == PACKET_LOOPBACK || | ||
| 128 | dev->flags & IFF_LOOPBACK) && | ||
| 129 | ipv6_addr_is_multicast(&hdr->daddr) && | ||
| 130 | IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) | ||
| 131 | goto err; | ||
| 132 | |||
| 121 | /* RFC4291 2.7 | 133 | /* RFC4291 2.7 |
| 122 | * Nodes must not originate a packet to a multicast address whose scope | 134 | * Nodes must not originate a packet to a multicast address whose scope |
| 123 | * field contains the reserved value 0; if such a packet is received, it | 135 | * field contains the reserved value 0; if such a packet is received, it |
| @@ -241,9 +253,11 @@ resubmit: | |||
| 241 | icmpv6_send(skb, ICMPV6_PARAMPROB, | 253 | icmpv6_send(skb, ICMPV6_PARAMPROB, |
| 242 | ICMPV6_UNK_NEXTHDR, nhoff); | 254 | ICMPV6_UNK_NEXTHDR, nhoff); |
| 243 | } | 255 | } |
| 244 | } else | 256 | kfree_skb(skb); |
| 257 | } else { | ||
| 245 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); | 258 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); |
| 246 | kfree_skb(skb); | 259 | consume_skb(skb); |
| 260 | } | ||
| 247 | } | 261 | } |
| 248 | rcu_read_unlock(); | 262 | rcu_read_unlock(); |
| 249 | return 0; | 263 | return 0; |
| @@ -279,7 +293,8 @@ int ip6_mc_input(struct sk_buff *skb) | |||
| 279 | * IPv6 multicast router mode is now supported ;) | 293 | * IPv6 multicast router mode is now supported ;) |
| 280 | */ | 294 | */ |
| 281 | if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && | 295 | if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && |
| 282 | !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && | 296 | !(ipv6_addr_type(&hdr->daddr) & |
| 297 | (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && | ||
| 283 | likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { | 298 | likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { |
| 284 | /* | 299 | /* |
| 285 | * Okay, we try to forward - split and duplicate | 300 | * Okay, we try to forward - split and duplicate |
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index 83acc1405a18..cb631143721c 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c | |||
| @@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt, | |||
| 57 | if (pfx_len - i >= 32) | 57 | if (pfx_len - i >= 32) |
| 58 | mask = 0; | 58 | mask = 0; |
| 59 | else | 59 | else |
| 60 | mask = htonl(~((1 << (pfx_len - i)) - 1)); | 60 | mask = htonl((1 << (i - pfx_len + 32)) - 1); |
| 61 | 61 | ||
| 62 | idx = i / 32; | 62 | idx = i / 32; |
| 63 | addr->s6_addr32[idx] &= mask; | 63 | addr->s6_addr32[idx] &= mask; |
| @@ -114,6 +114,7 @@ ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 114 | static struct xt_target ip6t_npt_target_reg[] __read_mostly = { | 114 | static struct xt_target ip6t_npt_target_reg[] __read_mostly = { |
| 115 | { | 115 | { |
| 116 | .name = "SNPT", | 116 | .name = "SNPT", |
| 117 | .table = "mangle", | ||
| 117 | .target = ip6t_snpt_tg, | 118 | .target = ip6t_snpt_tg, |
| 118 | .targetsize = sizeof(struct ip6t_npt_tginfo), | 119 | .targetsize = sizeof(struct ip6t_npt_tginfo), |
| 119 | .checkentry = ip6t_npt_checkentry, | 120 | .checkentry = ip6t_npt_checkentry, |
| @@ -124,6 +125,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = { | |||
| 124 | }, | 125 | }, |
| 125 | { | 126 | { |
| 126 | .name = "DNPT", | 127 | .name = "DNPT", |
| 128 | .table = "mangle", | ||
| 127 | .target = ip6t_dnpt_tg, | 129 | .target = ip6t_dnpt_tg, |
| 128 | .targetsize = sizeof(struct ip6t_npt_tginfo), | 130 | .targetsize = sizeof(struct ip6t_npt_tginfo), |
| 129 | .checkentry = ip6t_npt_checkentry, | 131 | .checkentry = ip6t_npt_checkentry, |
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index 5060d54199ab..e0983f3648a6 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c | |||
| @@ -71,6 +71,12 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb, | |||
| 71 | return ret; | 71 | return ret; |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static bool rpfilter_is_local(const struct sk_buff *skb) | ||
| 75 | { | ||
| 76 | const struct rt6_info *rt = (const void *) skb_dst(skb); | ||
| 77 | return rt && (rt->rt6i_flags & RTF_LOCAL); | ||
| 78 | } | ||
| 79 | |||
| 74 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | 80 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) |
| 75 | { | 81 | { |
| 76 | const struct xt_rpfilter_info *info = par->matchinfo; | 82 | const struct xt_rpfilter_info *info = par->matchinfo; |
| @@ -78,7 +84,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 78 | struct ipv6hdr *iph; | 84 | struct ipv6hdr *iph; |
| 79 | bool invert = info->flags & XT_RPFILTER_INVERT; | 85 | bool invert = info->flags & XT_RPFILTER_INVERT; |
| 80 | 86 | ||
| 81 | if (par->in->flags & IFF_LOOPBACK) | 87 | if (rpfilter_is_local(skb)) |
| 82 | return true ^ invert; | 88 | return true ^ invert; |
| 83 | 89 | ||
| 84 | iph = ipv6_hdr(skb); | 90 | iph = ipv6_hdr(skb); |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 54087e96d7b8..6700069949dd 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | * 2 of the License, or (at your option) any later version. | 14 | * 2 of the License, or (at your option) any later version. |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #define pr_fmt(fmt) "IPv6-nf: " fmt | ||
| 18 | |||
| 17 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
| 18 | #include <linux/types.h> | 20 | #include <linux/types.h> |
| 19 | #include <linux/string.h> | 21 | #include <linux/string.h> |
| @@ -180,13 +182,11 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id, | |||
| 180 | 182 | ||
| 181 | q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); | 183 | q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); |
| 182 | local_bh_enable(); | 184 | local_bh_enable(); |
| 183 | if (q == NULL) | 185 | if (IS_ERR_OR_NULL(q)) { |
| 184 | goto oom; | 186 | inet_frag_maybe_warn_overflow(q, pr_fmt()); |
| 185 | 187 | return NULL; | |
| 188 | } | ||
| 186 | return container_of(q, struct frag_queue, q); | 189 | return container_of(q, struct frag_queue, q); |
| 187 | |||
| 188 | oom: | ||
| 189 | return NULL; | ||
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | 192 | ||
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 3c6a77290c6e..0ba10e53a629 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
| @@ -26,6 +26,9 @@ | |||
| 26 | * YOSHIFUJI,H. @USAGI Always remove fragment header to | 26 | * YOSHIFUJI,H. @USAGI Always remove fragment header to |
| 27 | * calculate ICV correctly. | 27 | * calculate ICV correctly. |
| 28 | */ | 28 | */ |
| 29 | |||
| 30 | #define pr_fmt(fmt) "IPv6: " fmt | ||
| 31 | |||
| 29 | #include <linux/errno.h> | 32 | #include <linux/errno.h> |
| 30 | #include <linux/types.h> | 33 | #include <linux/types.h> |
| 31 | #include <linux/string.h> | 34 | #include <linux/string.h> |
| @@ -185,9 +188,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6 | |||
| 185 | hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); | 188 | hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); |
| 186 | 189 | ||
| 187 | q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); | 190 | q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); |
| 188 | if (q == NULL) | 191 | if (IS_ERR_OR_NULL(q)) { |
| 192 | inet_frag_maybe_warn_overflow(q, pr_fmt()); | ||
| 189 | return NULL; | 193 | return NULL; |
| 190 | 194 | } | |
| 191 | return container_of(q, struct frag_queue, q); | 195 | return container_of(q, struct frag_queue, q); |
| 192 | } | 196 | } |
| 193 | 197 | ||
| @@ -326,9 +330,17 @@ found: | |||
| 326 | } | 330 | } |
| 327 | 331 | ||
| 328 | if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && | 332 | if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
| 329 | fq->q.meat == fq->q.len) | 333 | fq->q.meat == fq->q.len) { |
| 330 | return ip6_frag_reasm(fq, prev, dev); | 334 | int res; |
| 335 | unsigned long orefdst = skb->_skb_refdst; | ||
| 336 | |||
| 337 | skb->_skb_refdst = 0UL; | ||
| 338 | res = ip6_frag_reasm(fq, prev, dev); | ||
| 339 | skb->_skb_refdst = orefdst; | ||
| 340 | return res; | ||
| 341 | } | ||
| 331 | 342 | ||
| 343 | skb_dst_drop(skb); | ||
| 332 | inet_frag_lru_move(&fq->q); | 344 | inet_frag_lru_move(&fq->q); |
| 333 | return -1; | 345 | return -1; |
| 334 | 346 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 928266569689..e5fe0041adfa 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -1915,7 +1915,8 @@ void rt6_purge_dflt_routers(struct net *net) | |||
| 1915 | restart: | 1915 | restart: |
| 1916 | read_lock_bh(&table->tb6_lock); | 1916 | read_lock_bh(&table->tb6_lock); |
| 1917 | for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { | 1917 | for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { |
| 1918 | if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { | 1918 | if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) && |
| 1919 | (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) { | ||
| 1919 | dst_hold(&rt->dst); | 1920 | dst_hold(&rt->dst); |
| 1920 | read_unlock_bh(&table->tb6_lock); | 1921 | read_unlock_bh(&table->tb6_lock); |
| 1921 | ip6_del_rt(rt); | 1922 | ip6_del_rt(rt); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 9b6460055df5..46a5be85be87 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -386,9 +386,17 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 386 | 386 | ||
| 387 | if (dst) | 387 | if (dst) |
| 388 | dst->ops->redirect(dst, sk, skb); | 388 | dst->ops->redirect(dst, sk, skb); |
| 389 | goto out; | ||
| 389 | } | 390 | } |
| 390 | 391 | ||
| 391 | if (type == ICMPV6_PKT_TOOBIG) { | 392 | if (type == ICMPV6_PKT_TOOBIG) { |
| 393 | /* We are not interested in TCP_LISTEN and open_requests | ||
| 394 | * (SYN-ACKs send out by Linux are always <576bytes so | ||
| 395 | * they should go through unfragmented). | ||
| 396 | */ | ||
| 397 | if (sk->sk_state == TCP_LISTEN) | ||
| 398 | goto out; | ||
| 399 | |||
| 392 | tp->mtu_info = ntohl(info); | 400 | tp->mtu_info = ntohl(info); |
| 393 | if (!sock_owned_by_user(sk)) | 401 | if (!sock_owned_by_user(sk)) |
| 394 | tcp_v6_mtu_reduced(sk); | 402 | tcp_v6_mtu_reduced(sk); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 599e1ba6d1ce..d8e5e852fc7a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -1285,10 +1285,18 @@ do_confirm: | |||
| 1285 | 1285 | ||
| 1286 | void udpv6_destroy_sock(struct sock *sk) | 1286 | void udpv6_destroy_sock(struct sock *sk) |
| 1287 | { | 1287 | { |
| 1288 | struct udp_sock *up = udp_sk(sk); | ||
| 1288 | lock_sock(sk); | 1289 | lock_sock(sk); |
| 1289 | udp_v6_flush_pending_frames(sk); | 1290 | udp_v6_flush_pending_frames(sk); |
| 1290 | release_sock(sk); | 1291 | release_sock(sk); |
| 1291 | 1292 | ||
| 1293 | if (static_key_false(&udpv6_encap_needed) && up->encap_type) { | ||
| 1294 | void (*encap_destroy)(struct sock *sk); | ||
| 1295 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | ||
| 1296 | if (encap_destroy) | ||
| 1297 | encap_destroy(sk); | ||
| 1298 | } | ||
| 1299 | |||
| 1292 | inet6_destroy_sock(sk); | 1300 | inet6_destroy_sock(sk); |
| 1293 | } | 1301 | } |
| 1294 | 1302 | ||
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index d07e3a626446..e493b3397ae3 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
| @@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
| 1386 | 1386 | ||
| 1387 | IRDA_DEBUG(4, "%s()\n", __func__); | 1387 | IRDA_DEBUG(4, "%s()\n", __func__); |
| 1388 | 1388 | ||
| 1389 | msg->msg_namelen = 0; | ||
| 1390 | |||
| 1389 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 1391 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
| 1390 | flags & MSG_DONTWAIT, &err); | 1392 | flags & MSG_DONTWAIT, &err); |
| 1391 | if (!skb) | 1393 | if (!skb) |
| @@ -2583,8 +2585,10 @@ bed: | |||
| 2583 | NULL, NULL, NULL); | 2585 | NULL, NULL, NULL); |
| 2584 | 2586 | ||
| 2585 | /* Check if the we got some results */ | 2587 | /* Check if the we got some results */ |
| 2586 | if (!self->cachedaddr) | 2588 | if (!self->cachedaddr) { |
| 2587 | return -EAGAIN; /* Didn't find any devices */ | 2589 | err = -EAGAIN; /* Didn't find any devices */ |
| 2590 | goto out; | ||
| 2591 | } | ||
| 2588 | daddr = self->cachedaddr; | 2592 | daddr = self->cachedaddr; |
| 2589 | /* Cleanup */ | 2593 | /* Cleanup */ |
| 2590 | self->cachedaddr = 0; | 2594 | self->cachedaddr = 0; |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 9a5fd3c3e530..362ba47968e4 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
| @@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
| 280 | struct tty_port *port = &self->port; | 280 | struct tty_port *port = &self->port; |
| 281 | DECLARE_WAITQUEUE(wait, current); | 281 | DECLARE_WAITQUEUE(wait, current); |
| 282 | int retval; | 282 | int retval; |
| 283 | int do_clocal = 0, extra_count = 0; | 283 | int do_clocal = 0; |
| 284 | unsigned long flags; | 284 | unsigned long flags; |
| 285 | 285 | ||
| 286 | IRDA_DEBUG(2, "%s()\n", __func__ ); | 286 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
| @@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
| 289 | * If non-blocking mode is set, or the port is not enabled, | 289 | * If non-blocking mode is set, or the port is not enabled, |
| 290 | * then make the check up front and then exit. | 290 | * then make the check up front and then exit. |
| 291 | */ | 291 | */ |
| 292 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ | 292 | if (test_bit(TTY_IO_ERROR, &tty->flags)) { |
| 293 | /* nonblock mode is set or port is not enabled */ | 293 | port->flags |= ASYNC_NORMAL_ACTIVE; |
| 294 | return 0; | ||
| 295 | } | ||
| 296 | |||
| 297 | if (filp->f_flags & O_NONBLOCK) { | ||
| 298 | /* nonblock mode is set */ | ||
| 299 | if (tty->termios.c_cflag & CBAUD) | ||
| 300 | tty_port_raise_dtr_rts(port); | ||
| 294 | port->flags |= ASYNC_NORMAL_ACTIVE; | 301 | port->flags |= ASYNC_NORMAL_ACTIVE; |
| 295 | IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); | 302 | IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); |
| 296 | return 0; | 303 | return 0; |
| @@ -315,18 +322,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
| 315 | __FILE__, __LINE__, tty->driver->name, port->count); | 322 | __FILE__, __LINE__, tty->driver->name, port->count); |
| 316 | 323 | ||
| 317 | spin_lock_irqsave(&port->lock, flags); | 324 | spin_lock_irqsave(&port->lock, flags); |
| 318 | if (!tty_hung_up_p(filp)) { | 325 | if (!tty_hung_up_p(filp)) |
| 319 | extra_count = 1; | ||
| 320 | port->count--; | 326 | port->count--; |
| 321 | } | ||
| 322 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 323 | port->blocked_open++; | 327 | port->blocked_open++; |
| 328 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 324 | 329 | ||
| 325 | while (1) { | 330 | while (1) { |
| 326 | if (tty->termios.c_cflag & CBAUD) | 331 | if (tty->termios.c_cflag & CBAUD) |
| 327 | tty_port_raise_dtr_rts(port); | 332 | tty_port_raise_dtr_rts(port); |
| 328 | 333 | ||
| 329 | current->state = TASK_INTERRUPTIBLE; | 334 | set_current_state(TASK_INTERRUPTIBLE); |
| 330 | 335 | ||
| 331 | if (tty_hung_up_p(filp) || | 336 | if (tty_hung_up_p(filp) || |
| 332 | !test_bit(ASYNCB_INITIALIZED, &port->flags)) { | 337 | !test_bit(ASYNCB_INITIALIZED, &port->flags)) { |
| @@ -361,13 +366,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, | |||
| 361 | __set_current_state(TASK_RUNNING); | 366 | __set_current_state(TASK_RUNNING); |
| 362 | remove_wait_queue(&port->open_wait, &wait); | 367 | remove_wait_queue(&port->open_wait, &wait); |
| 363 | 368 | ||
| 364 | if (extra_count) { | 369 | spin_lock_irqsave(&port->lock, flags); |
| 365 | /* ++ is not atomic, so this should be protected - Jean II */ | 370 | if (!tty_hung_up_p(filp)) |
| 366 | spin_lock_irqsave(&port->lock, flags); | ||
| 367 | port->count++; | 371 | port->count++; |
| 368 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 369 | } | ||
| 370 | port->blocked_open--; | 372 | port->blocked_open--; |
| 373 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 371 | 374 | ||
| 372 | IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", | 375 | IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", |
| 373 | __FILE__, __LINE__, tty->driver->name, port->count); | 376 | __FILE__, __LINE__, tty->driver->name, port->count); |
diff --git a/net/irda/iriap.c b/net/irda/iriap.c index e71e85ba2bf1..e1b37f5a2691 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c | |||
| @@ -303,7 +303,8 @@ static void iriap_disconnect_indication(void *instance, void *sap, | |||
| 303 | { | 303 | { |
| 304 | struct iriap_cb *self; | 304 | struct iriap_cb *self; |
| 305 | 305 | ||
| 306 | IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); | 306 | IRDA_DEBUG(4, "%s(), reason=%s [%d]\n", __func__, |
| 307 | irlmp_reason_str(reason), reason); | ||
| 307 | 308 | ||
| 308 | self = instance; | 309 | self = instance; |
| 309 | 310 | ||
| @@ -495,8 +496,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, | |||
| 495 | /* case CS_ISO_8859_9: */ | 496 | /* case CS_ISO_8859_9: */ |
| 496 | /* case CS_UNICODE: */ | 497 | /* case CS_UNICODE: */ |
| 497 | default: | 498 | default: |
| 498 | IRDA_DEBUG(0, "%s(), charset %s, not supported\n", | 499 | IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n", |
| 499 | __func__, ias_charset_types[charset]); | 500 | __func__, charset, |
| 501 | charset < ARRAY_SIZE(ias_charset_types) ? | ||
| 502 | ias_charset_types[charset] : | ||
| 503 | "(unknown)"); | ||
| 500 | 504 | ||
| 501 | /* Aborting, close connection! */ | 505 | /* Aborting, close connection! */ |
| 502 | iriap_disconnect_request(self); | 506 | iriap_disconnect_request(self); |
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 6115a44c0a24..1064621da6f6 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c | |||
| @@ -66,8 +66,15 @@ const char *irlmp_reasons[] = { | |||
| 66 | "LM_LAP_RESET", | 66 | "LM_LAP_RESET", |
| 67 | "LM_INIT_DISCONNECT", | 67 | "LM_INIT_DISCONNECT", |
| 68 | "ERROR, NOT USED", | 68 | "ERROR, NOT USED", |
| 69 | "UNKNOWN", | ||
| 69 | }; | 70 | }; |
| 70 | 71 | ||
| 72 | const char *irlmp_reason_str(LM_REASON reason) | ||
| 73 | { | ||
| 74 | reason = min_t(size_t, reason, ARRAY_SIZE(irlmp_reasons) - 1); | ||
| 75 | return irlmp_reasons[reason]; | ||
| 76 | } | ||
| 77 | |||
| 71 | /* | 78 | /* |
| 72 | * Function irlmp_init (void) | 79 | * Function irlmp_init (void) |
| 73 | * | 80 | * |
| @@ -747,7 +754,8 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason, | |||
| 747 | { | 754 | { |
| 748 | struct lsap_cb *lsap; | 755 | struct lsap_cb *lsap; |
| 749 | 756 | ||
| 750 | IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); | 757 | IRDA_DEBUG(1, "%s(), reason=%s [%d]\n", __func__, |
| 758 | irlmp_reason_str(reason), reason); | ||
| 751 | IRDA_ASSERT(self != NULL, return;); | 759 | IRDA_ASSERT(self != NULL, return;); |
| 752 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); | 760 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); |
| 753 | 761 | ||
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a7d11ffe4284..206ce6db2c36 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
| @@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] = | |||
| 49 | 49 | ||
| 50 | #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) | 50 | #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) |
| 51 | 51 | ||
| 52 | /* macros to set/get socket control buffer at correct offset */ | ||
| 53 | #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */ | ||
| 54 | #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag)) | ||
| 55 | #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ | ||
| 56 | #define CB_TRGCLS_LEN (TRGCLS_SIZE) | ||
| 57 | |||
| 58 | #define __iucv_sock_wait(sk, condition, timeo, ret) \ | 52 | #define __iucv_sock_wait(sk, condition, timeo, ret) \ |
| 59 | do { \ | 53 | do { \ |
| 60 | DEFINE_WAIT(__wait); \ | 54 | DEFINE_WAIT(__wait); \ |
| @@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1141 | 1135 | ||
| 1142 | /* increment and save iucv message tag for msg_completion cbk */ | 1136 | /* increment and save iucv message tag for msg_completion cbk */ |
| 1143 | txmsg.tag = iucv->send_tag++; | 1137 | txmsg.tag = iucv->send_tag++; |
| 1144 | memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); | 1138 | IUCV_SKB_CB(skb)->tag = txmsg.tag; |
| 1145 | 1139 | ||
| 1146 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { | 1140 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { |
| 1147 | atomic_inc(&iucv->msg_sent); | 1141 | atomic_inc(&iucv->msg_sent); |
| @@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) | |||
| 1224 | return -ENOMEM; | 1218 | return -ENOMEM; |
| 1225 | 1219 | ||
| 1226 | /* copy target class to control buffer of new skb */ | 1220 | /* copy target class to control buffer of new skb */ |
| 1227 | memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); | 1221 | IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class; |
| 1228 | 1222 | ||
| 1229 | /* copy data fragment */ | 1223 | /* copy data fragment */ |
| 1230 | memcpy(nskb->data, skb->data + copied, size); | 1224 | memcpy(nskb->data, skb->data + copied, size); |
| @@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, | |||
| 1256 | 1250 | ||
| 1257 | /* store msg target class in the second 4 bytes of skb ctrl buffer */ | 1251 | /* store msg target class in the second 4 bytes of skb ctrl buffer */ |
| 1258 | /* Note: the first 4 bytes are reserved for msg tag */ | 1252 | /* Note: the first 4 bytes are reserved for msg tag */ |
| 1259 | memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); | 1253 | IUCV_SKB_CB(skb)->class = msg->class; |
| 1260 | 1254 | ||
| 1261 | /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ | 1255 | /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ |
| 1262 | if ((msg->flags & IUCV_IPRMDATA) && len > 7) { | 1256 | if ((msg->flags & IUCV_IPRMDATA) && len > 7) { |
| @@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, | |||
| 1292 | } | 1286 | } |
| 1293 | } | 1287 | } |
| 1294 | 1288 | ||
| 1289 | IUCV_SKB_CB(skb)->offset = 0; | ||
| 1295 | if (sock_queue_rcv_skb(sk, skb)) | 1290 | if (sock_queue_rcv_skb(sk, skb)) |
| 1296 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); | 1291 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); |
| 1297 | } | 1292 | } |
| @@ -1327,6 +1322,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1327 | unsigned int copied, rlen; | 1322 | unsigned int copied, rlen; |
| 1328 | struct sk_buff *skb, *rskb, *cskb; | 1323 | struct sk_buff *skb, *rskb, *cskb; |
| 1329 | int err = 0; | 1324 | int err = 0; |
| 1325 | u32 offset; | ||
| 1326 | |||
| 1327 | msg->msg_namelen = 0; | ||
| 1330 | 1328 | ||
| 1331 | if ((sk->sk_state == IUCV_DISCONN) && | 1329 | if ((sk->sk_state == IUCV_DISCONN) && |
| 1332 | skb_queue_empty(&iucv->backlog_skb_q) && | 1330 | skb_queue_empty(&iucv->backlog_skb_q) && |
| @@ -1346,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1346 | return err; | 1344 | return err; |
| 1347 | } | 1345 | } |
| 1348 | 1346 | ||
| 1349 | rlen = skb->len; /* real length of skb */ | 1347 | offset = IUCV_SKB_CB(skb)->offset; |
| 1348 | rlen = skb->len - offset; /* real length of skb */ | ||
| 1350 | copied = min_t(unsigned int, rlen, len); | 1349 | copied = min_t(unsigned int, rlen, len); |
| 1351 | if (!rlen) | 1350 | if (!rlen) |
| 1352 | sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; | 1351 | sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; |
| 1353 | 1352 | ||
| 1354 | cskb = skb; | 1353 | cskb = skb; |
| 1355 | if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { | 1354 | if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { |
| 1356 | if (!(flags & MSG_PEEK)) | 1355 | if (!(flags & MSG_PEEK)) |
| 1357 | skb_queue_head(&sk->sk_receive_queue, skb); | 1356 | skb_queue_head(&sk->sk_receive_queue, skb); |
| 1358 | return -EFAULT; | 1357 | return -EFAULT; |
| @@ -1370,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1370 | * get the trgcls from the control buffer of the skb due to | 1369 | * get the trgcls from the control buffer of the skb due to |
| 1371 | * fragmentation of original iucv message. */ | 1370 | * fragmentation of original iucv message. */ |
| 1372 | err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, | 1371 | err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, |
| 1373 | CB_TRGCLS_LEN, CB_TRGCLS(skb)); | 1372 | sizeof(IUCV_SKB_CB(skb)->class), |
| 1373 | (void *)&IUCV_SKB_CB(skb)->class); | ||
| 1374 | if (err) { | 1374 | if (err) { |
| 1375 | if (!(flags & MSG_PEEK)) | 1375 | if (!(flags & MSG_PEEK)) |
| 1376 | skb_queue_head(&sk->sk_receive_queue, skb); | 1376 | skb_queue_head(&sk->sk_receive_queue, skb); |
| @@ -1382,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1382 | 1382 | ||
| 1383 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ | 1383 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ |
| 1384 | if (sk->sk_type == SOCK_STREAM) { | 1384 | if (sk->sk_type == SOCK_STREAM) { |
| 1385 | skb_pull(skb, copied); | 1385 | if (copied < rlen) { |
| 1386 | if (skb->len) { | 1386 | IUCV_SKB_CB(skb)->offset = offset + copied; |
| 1387 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
| 1388 | goto done; | 1387 | goto done; |
| 1389 | } | 1388 | } |
| 1390 | } | 1389 | } |
| @@ -1403,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1403 | spin_lock_bh(&iucv->message_q.lock); | 1402 | spin_lock_bh(&iucv->message_q.lock); |
| 1404 | rskb = skb_dequeue(&iucv->backlog_skb_q); | 1403 | rskb = skb_dequeue(&iucv->backlog_skb_q); |
| 1405 | while (rskb) { | 1404 | while (rskb) { |
| 1405 | IUCV_SKB_CB(rskb)->offset = 0; | ||
| 1406 | if (sock_queue_rcv_skb(sk, rskb)) { | 1406 | if (sock_queue_rcv_skb(sk, rskb)) { |
| 1407 | skb_queue_head(&iucv->backlog_skb_q, | 1407 | skb_queue_head(&iucv->backlog_skb_q, |
| 1408 | rskb); | 1408 | rskb); |
| @@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path, | |||
| 1830 | spin_lock_irqsave(&list->lock, flags); | 1830 | spin_lock_irqsave(&list->lock, flags); |
| 1831 | 1831 | ||
| 1832 | while (list_skb != (struct sk_buff *)list) { | 1832 | while (list_skb != (struct sk_buff *)list) { |
| 1833 | if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { | 1833 | if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { |
| 1834 | this = list_skb; | 1834 | this = list_skb; |
| 1835 | break; | 1835 | break; |
| 1836 | } | 1836 | } |
| @@ -2091,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) | |||
| 2091 | skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); | 2091 | skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); |
| 2092 | skb_reset_transport_header(skb); | 2092 | skb_reset_transport_header(skb); |
| 2093 | skb_reset_network_header(skb); | 2093 | skb_reset_network_header(skb); |
| 2094 | IUCV_SKB_CB(skb)->offset = 0; | ||
| 2094 | spin_lock(&iucv->message_q.lock); | 2095 | spin_lock(&iucv->message_q.lock); |
| 2095 | if (skb_queue_empty(&iucv->backlog_skb_q)) { | 2096 | if (skb_queue_empty(&iucv->backlog_skb_q)) { |
| 2096 | if (sock_queue_rcv_skb(sk, skb)) { | 2097 | if (sock_queue_rcv_skb(sk, skb)) { |
| @@ -2195,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | |||
| 2195 | /* fall through and receive zero length data */ | 2196 | /* fall through and receive zero length data */ |
| 2196 | case 0: | 2197 | case 0: |
| 2197 | /* plain data frame */ | 2198 | /* plain data frame */ |
| 2198 | memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, | 2199 | IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; |
| 2199 | CB_TRGCLS_LEN); | ||
| 2200 | err = afiucv_hs_callback_rx(sk, skb); | 2200 | err = afiucv_hs_callback_rx(sk, skb); |
| 2201 | break; | 2201 | break; |
| 2202 | default: | 2202 | default: |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 556fdafdd1ea..5b1e5af25713 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
| @@ -2201,7 +2201,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ | |||
| 2201 | XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); | 2201 | XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); |
| 2202 | xp->priority = pol->sadb_x_policy_priority; | 2202 | xp->priority = pol->sadb_x_policy_priority; |
| 2203 | 2203 | ||
| 2204 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], | 2204 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; |
| 2205 | xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); | 2205 | xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); |
| 2206 | if (!xp->family) { | 2206 | if (!xp->family) { |
| 2207 | err = -EINVAL; | 2207 | err = -EINVAL; |
| @@ -2214,7 +2214,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ | |||
| 2214 | if (xp->selector.sport) | 2214 | if (xp->selector.sport) |
| 2215 | xp->selector.sport_mask = htons(0xffff); | 2215 | xp->selector.sport_mask = htons(0xffff); |
| 2216 | 2216 | ||
| 2217 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], | 2217 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; |
| 2218 | pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); | 2218 | pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); |
| 2219 | xp->selector.prefixlen_d = sa->sadb_address_prefixlen; | 2219 | xp->selector.prefixlen_d = sa->sadb_address_prefixlen; |
| 2220 | 2220 | ||
| @@ -2315,7 +2315,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa | |||
| 2315 | 2315 | ||
| 2316 | memset(&sel, 0, sizeof(sel)); | 2316 | memset(&sel, 0, sizeof(sel)); |
| 2317 | 2317 | ||
| 2318 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], | 2318 | sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; |
| 2319 | sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); | 2319 | sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); |
| 2320 | sel.prefixlen_s = sa->sadb_address_prefixlen; | 2320 | sel.prefixlen_s = sa->sadb_address_prefixlen; |
| 2321 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); | 2321 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); |
| @@ -2323,7 +2323,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa | |||
| 2323 | if (sel.sport) | 2323 | if (sel.sport) |
| 2324 | sel.sport_mask = htons(0xffff); | 2324 | sel.sport_mask = htons(0xffff); |
| 2325 | 2325 | ||
| 2326 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], | 2326 | sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; |
| 2327 | pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); | 2327 | pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); |
| 2328 | sel.prefixlen_d = sa->sadb_address_prefixlen; | 2328 | sel.prefixlen_d = sa->sadb_address_prefixlen; |
| 2329 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); | 2329 | sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); |
| @@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c) | |||
| 2693 | hdr->sadb_msg_pid = c->portid; | 2693 | hdr->sadb_msg_pid = c->portid; |
| 2694 | hdr->sadb_msg_version = PF_KEY_V2; | 2694 | hdr->sadb_msg_version = PF_KEY_V2; |
| 2695 | hdr->sadb_msg_errno = (uint8_t) 0; | 2695 | hdr->sadb_msg_errno = (uint8_t) 0; |
| 2696 | hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; | ||
| 2696 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); | 2697 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); |
| 2697 | pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); | 2698 | pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); |
| 2698 | return 0; | 2699 | return 0; |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index d36875f3427e..8aecf5df6656 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
| @@ -114,7 +114,6 @@ struct l2tp_net { | |||
| 114 | 114 | ||
| 115 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); | 115 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); |
| 116 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | 116 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); |
| 117 | static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | ||
| 118 | 117 | ||
| 119 | static inline struct l2tp_net *l2tp_pernet(struct net *net) | 118 | static inline struct l2tp_net *l2tp_pernet(struct net *net) |
| 120 | { | 119 | { |
| @@ -192,6 +191,7 @@ struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) | |||
| 192 | } else { | 191 | } else { |
| 193 | /* Socket is owned by kernelspace */ | 192 | /* Socket is owned by kernelspace */ |
| 194 | sk = tunnel->sock; | 193 | sk = tunnel->sock; |
| 194 | sock_hold(sk); | ||
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | out: | 197 | out: |
| @@ -210,6 +210,7 @@ void l2tp_tunnel_sock_put(struct sock *sk) | |||
| 210 | } | 210 | } |
| 211 | sock_put(sk); | 211 | sock_put(sk); |
| 212 | } | 212 | } |
| 213 | sock_put(sk); | ||
| 213 | } | 214 | } |
| 214 | EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); | 215 | EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); |
| 215 | 216 | ||
| @@ -373,10 +374,8 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk | |||
| 373 | struct sk_buff *skbp; | 374 | struct sk_buff *skbp; |
| 374 | struct sk_buff *tmp; | 375 | struct sk_buff *tmp; |
| 375 | u32 ns = L2TP_SKB_CB(skb)->ns; | 376 | u32 ns = L2TP_SKB_CB(skb)->ns; |
| 376 | struct l2tp_stats *sstats; | ||
| 377 | 377 | ||
| 378 | spin_lock_bh(&session->reorder_q.lock); | 378 | spin_lock_bh(&session->reorder_q.lock); |
| 379 | sstats = &session->stats; | ||
| 380 | skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { | 379 | skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { |
| 381 | if (L2TP_SKB_CB(skbp)->ns > ns) { | 380 | if (L2TP_SKB_CB(skbp)->ns > ns) { |
| 382 | __skb_queue_before(&session->reorder_q, skbp, skb); | 381 | __skb_queue_before(&session->reorder_q, skbp, skb); |
| @@ -384,9 +383,7 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk | |||
| 384 | "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", | 383 | "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", |
| 385 | session->name, ns, L2TP_SKB_CB(skbp)->ns, | 384 | session->name, ns, L2TP_SKB_CB(skbp)->ns, |
| 386 | skb_queue_len(&session->reorder_q)); | 385 | skb_queue_len(&session->reorder_q)); |
| 387 | u64_stats_update_begin(&sstats->syncp); | 386 | atomic_long_inc(&session->stats.rx_oos_packets); |
| 388 | sstats->rx_oos_packets++; | ||
| 389 | u64_stats_update_end(&sstats->syncp); | ||
| 390 | goto out; | 387 | goto out; |
| 391 | } | 388 | } |
| 392 | } | 389 | } |
| @@ -403,23 +400,16 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff * | |||
| 403 | { | 400 | { |
| 404 | struct l2tp_tunnel *tunnel = session->tunnel; | 401 | struct l2tp_tunnel *tunnel = session->tunnel; |
| 405 | int length = L2TP_SKB_CB(skb)->length; | 402 | int length = L2TP_SKB_CB(skb)->length; |
| 406 | struct l2tp_stats *tstats, *sstats; | ||
| 407 | 403 | ||
| 408 | /* We're about to requeue the skb, so return resources | 404 | /* We're about to requeue the skb, so return resources |
| 409 | * to its current owner (a socket receive buffer). | 405 | * to its current owner (a socket receive buffer). |
| 410 | */ | 406 | */ |
| 411 | skb_orphan(skb); | 407 | skb_orphan(skb); |
| 412 | 408 | ||
| 413 | tstats = &tunnel->stats; | 409 | atomic_long_inc(&tunnel->stats.rx_packets); |
| 414 | u64_stats_update_begin(&tstats->syncp); | 410 | atomic_long_add(length, &tunnel->stats.rx_bytes); |
| 415 | sstats = &session->stats; | 411 | atomic_long_inc(&session->stats.rx_packets); |
| 416 | u64_stats_update_begin(&sstats->syncp); | 412 | atomic_long_add(length, &session->stats.rx_bytes); |
| 417 | tstats->rx_packets++; | ||
| 418 | tstats->rx_bytes += length; | ||
| 419 | sstats->rx_packets++; | ||
| 420 | sstats->rx_bytes += length; | ||
| 421 | u64_stats_update_end(&tstats->syncp); | ||
| 422 | u64_stats_update_end(&sstats->syncp); | ||
| 423 | 413 | ||
| 424 | if (L2TP_SKB_CB(skb)->has_seq) { | 414 | if (L2TP_SKB_CB(skb)->has_seq) { |
| 425 | /* Bump our Nr */ | 415 | /* Bump our Nr */ |
| @@ -450,7 +440,6 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) | |||
| 450 | { | 440 | { |
| 451 | struct sk_buff *skb; | 441 | struct sk_buff *skb; |
| 452 | struct sk_buff *tmp; | 442 | struct sk_buff *tmp; |
| 453 | struct l2tp_stats *sstats; | ||
| 454 | 443 | ||
| 455 | /* If the pkt at the head of the queue has the nr that we | 444 | /* If the pkt at the head of the queue has the nr that we |
| 456 | * expect to send up next, dequeue it and any other | 445 | * expect to send up next, dequeue it and any other |
| @@ -458,13 +447,10 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) | |||
| 458 | */ | 447 | */ |
| 459 | start: | 448 | start: |
| 460 | spin_lock_bh(&session->reorder_q.lock); | 449 | spin_lock_bh(&session->reorder_q.lock); |
| 461 | sstats = &session->stats; | ||
| 462 | skb_queue_walk_safe(&session->reorder_q, skb, tmp) { | 450 | skb_queue_walk_safe(&session->reorder_q, skb, tmp) { |
| 463 | if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { | 451 | if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { |
| 464 | u64_stats_update_begin(&sstats->syncp); | 452 | atomic_long_inc(&session->stats.rx_seq_discards); |
| 465 | sstats->rx_seq_discards++; | 453 | atomic_long_inc(&session->stats.rx_errors); |
| 466 | sstats->rx_errors++; | ||
| 467 | u64_stats_update_end(&sstats->syncp); | ||
| 468 | l2tp_dbg(session, L2TP_MSG_SEQ, | 454 | l2tp_dbg(session, L2TP_MSG_SEQ, |
| 469 | "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", | 455 | "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", |
| 470 | session->name, L2TP_SKB_CB(skb)->ns, | 456 | session->name, L2TP_SKB_CB(skb)->ns, |
| @@ -623,7 +609,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
| 623 | struct l2tp_tunnel *tunnel = session->tunnel; | 609 | struct l2tp_tunnel *tunnel = session->tunnel; |
| 624 | int offset; | 610 | int offset; |
| 625 | u32 ns, nr; | 611 | u32 ns, nr; |
| 626 | struct l2tp_stats *sstats = &session->stats; | ||
| 627 | 612 | ||
| 628 | /* The ref count is increased since we now hold a pointer to | 613 | /* The ref count is increased since we now hold a pointer to |
| 629 | * the session. Take care to decrement the refcnt when exiting | 614 | * the session. Take care to decrement the refcnt when exiting |
| @@ -640,9 +625,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
| 640 | "%s: cookie mismatch (%u/%u). Discarding.\n", | 625 | "%s: cookie mismatch (%u/%u). Discarding.\n", |
| 641 | tunnel->name, tunnel->tunnel_id, | 626 | tunnel->name, tunnel->tunnel_id, |
| 642 | session->session_id); | 627 | session->session_id); |
| 643 | u64_stats_update_begin(&sstats->syncp); | 628 | atomic_long_inc(&session->stats.rx_cookie_discards); |
| 644 | sstats->rx_cookie_discards++; | ||
| 645 | u64_stats_update_end(&sstats->syncp); | ||
| 646 | goto discard; | 629 | goto discard; |
| 647 | } | 630 | } |
| 648 | ptr += session->peer_cookie_len; | 631 | ptr += session->peer_cookie_len; |
| @@ -711,9 +694,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
| 711 | l2tp_warn(session, L2TP_MSG_SEQ, | 694 | l2tp_warn(session, L2TP_MSG_SEQ, |
| 712 | "%s: recv data has no seq numbers when required. Discarding.\n", | 695 | "%s: recv data has no seq numbers when required. Discarding.\n", |
| 713 | session->name); | 696 | session->name); |
| 714 | u64_stats_update_begin(&sstats->syncp); | 697 | atomic_long_inc(&session->stats.rx_seq_discards); |
| 715 | sstats->rx_seq_discards++; | ||
| 716 | u64_stats_update_end(&sstats->syncp); | ||
| 717 | goto discard; | 698 | goto discard; |
| 718 | } | 699 | } |
| 719 | 700 | ||
| @@ -732,9 +713,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
| 732 | l2tp_warn(session, L2TP_MSG_SEQ, | 713 | l2tp_warn(session, L2TP_MSG_SEQ, |
| 733 | "%s: recv data has no seq numbers when required. Discarding.\n", | 714 | "%s: recv data has no seq numbers when required. Discarding.\n", |
| 734 | session->name); | 715 | session->name); |
| 735 | u64_stats_update_begin(&sstats->syncp); | 716 | atomic_long_inc(&session->stats.rx_seq_discards); |
| 736 | sstats->rx_seq_discards++; | ||
| 737 | u64_stats_update_end(&sstats->syncp); | ||
| 738 | goto discard; | 717 | goto discard; |
| 739 | } | 718 | } |
| 740 | } | 719 | } |
| @@ -788,9 +767,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
| 788 | * packets | 767 | * packets |
| 789 | */ | 768 | */ |
| 790 | if (L2TP_SKB_CB(skb)->ns != session->nr) { | 769 | if (L2TP_SKB_CB(skb)->ns != session->nr) { |
| 791 | u64_stats_update_begin(&sstats->syncp); | 770 | atomic_long_inc(&session->stats.rx_seq_discards); |
| 792 | sstats->rx_seq_discards++; | ||
| 793 | u64_stats_update_end(&sstats->syncp); | ||
| 794 | l2tp_dbg(session, L2TP_MSG_SEQ, | 771 | l2tp_dbg(session, L2TP_MSG_SEQ, |
| 795 | "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", | 772 | "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", |
| 796 | session->name, L2TP_SKB_CB(skb)->ns, | 773 | session->name, L2TP_SKB_CB(skb)->ns, |
| @@ -816,9 +793,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
| 816 | return; | 793 | return; |
| 817 | 794 | ||
| 818 | discard: | 795 | discard: |
| 819 | u64_stats_update_begin(&sstats->syncp); | 796 | atomic_long_inc(&session->stats.rx_errors); |
| 820 | sstats->rx_errors++; | ||
| 821 | u64_stats_update_end(&sstats->syncp); | ||
| 822 | kfree_skb(skb); | 797 | kfree_skb(skb); |
| 823 | 798 | ||
| 824 | if (session->deref) | 799 | if (session->deref) |
| @@ -828,6 +803,23 @@ discard: | |||
| 828 | } | 803 | } |
| 829 | EXPORT_SYMBOL(l2tp_recv_common); | 804 | EXPORT_SYMBOL(l2tp_recv_common); |
| 830 | 805 | ||
| 806 | /* Drop skbs from the session's reorder_q | ||
| 807 | */ | ||
| 808 | int l2tp_session_queue_purge(struct l2tp_session *session) | ||
| 809 | { | ||
| 810 | struct sk_buff *skb = NULL; | ||
| 811 | BUG_ON(!session); | ||
| 812 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
| 813 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
| 814 | atomic_long_inc(&session->stats.rx_errors); | ||
| 815 | kfree_skb(skb); | ||
| 816 | if (session->deref) | ||
| 817 | (*session->deref)(session); | ||
| 818 | } | ||
| 819 | return 0; | ||
| 820 | } | ||
| 821 | EXPORT_SYMBOL_GPL(l2tp_session_queue_purge); | ||
| 822 | |||
| 831 | /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame | 823 | /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame |
| 832 | * here. The skb is not on a list when we get here. | 824 | * here. The skb is not on a list when we get here. |
| 833 | * Returns 0 if the packet was a data packet and was successfully passed on. | 825 | * Returns 0 if the packet was a data packet and was successfully passed on. |
| @@ -843,7 +835,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, | |||
| 843 | u32 tunnel_id, session_id; | 835 | u32 tunnel_id, session_id; |
| 844 | u16 version; | 836 | u16 version; |
| 845 | int length; | 837 | int length; |
| 846 | struct l2tp_stats *tstats; | ||
| 847 | 838 | ||
| 848 | if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) | 839 | if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) |
| 849 | goto discard_bad_csum; | 840 | goto discard_bad_csum; |
| @@ -932,10 +923,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, | |||
| 932 | discard_bad_csum: | 923 | discard_bad_csum: |
| 933 | LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); | 924 | LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); |
| 934 | UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); | 925 | UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); |
| 935 | tstats = &tunnel->stats; | 926 | atomic_long_inc(&tunnel->stats.rx_errors); |
| 936 | u64_stats_update_begin(&tstats->syncp); | ||
| 937 | tstats->rx_errors++; | ||
| 938 | u64_stats_update_end(&tstats->syncp); | ||
| 939 | kfree_skb(skb); | 927 | kfree_skb(skb); |
| 940 | 928 | ||
| 941 | return 0; | 929 | return 0; |
| @@ -1062,7 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, | |||
| 1062 | struct l2tp_tunnel *tunnel = session->tunnel; | 1050 | struct l2tp_tunnel *tunnel = session->tunnel; |
| 1063 | unsigned int len = skb->len; | 1051 | unsigned int len = skb->len; |
| 1064 | int error; | 1052 | int error; |
| 1065 | struct l2tp_stats *tstats, *sstats; | ||
| 1066 | 1053 | ||
| 1067 | /* Debug */ | 1054 | /* Debug */ |
| 1068 | if (session->send_seq) | 1055 | if (session->send_seq) |
| @@ -1091,21 +1078,15 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, | |||
| 1091 | error = ip_queue_xmit(skb, fl); | 1078 | error = ip_queue_xmit(skb, fl); |
| 1092 | 1079 | ||
| 1093 | /* Update stats */ | 1080 | /* Update stats */ |
| 1094 | tstats = &tunnel->stats; | ||
| 1095 | u64_stats_update_begin(&tstats->syncp); | ||
| 1096 | sstats = &session->stats; | ||
| 1097 | u64_stats_update_begin(&sstats->syncp); | ||
| 1098 | if (error >= 0) { | 1081 | if (error >= 0) { |
| 1099 | tstats->tx_packets++; | 1082 | atomic_long_inc(&tunnel->stats.tx_packets); |
| 1100 | tstats->tx_bytes += len; | 1083 | atomic_long_add(len, &tunnel->stats.tx_bytes); |
| 1101 | sstats->tx_packets++; | 1084 | atomic_long_inc(&session->stats.tx_packets); |
| 1102 | sstats->tx_bytes += len; | 1085 | atomic_long_add(len, &session->stats.tx_bytes); |
| 1103 | } else { | 1086 | } else { |
| 1104 | tstats->tx_errors++; | 1087 | atomic_long_inc(&tunnel->stats.tx_errors); |
| 1105 | sstats->tx_errors++; | 1088 | atomic_long_inc(&session->stats.tx_errors); |
| 1106 | } | 1089 | } |
| 1107 | u64_stats_update_end(&tstats->syncp); | ||
| 1108 | u64_stats_update_end(&sstats->syncp); | ||
| 1109 | 1090 | ||
| 1110 | return 0; | 1091 | return 0; |
| 1111 | } | 1092 | } |
| @@ -1282,6 +1263,7 @@ static void l2tp_tunnel_destruct(struct sock *sk) | |||
| 1282 | /* No longer an encapsulation socket. See net/ipv4/udp.c */ | 1263 | /* No longer an encapsulation socket. See net/ipv4/udp.c */ |
| 1283 | (udp_sk(sk))->encap_type = 0; | 1264 | (udp_sk(sk))->encap_type = 0; |
| 1284 | (udp_sk(sk))->encap_rcv = NULL; | 1265 | (udp_sk(sk))->encap_rcv = NULL; |
| 1266 | (udp_sk(sk))->encap_destroy = NULL; | ||
| 1285 | break; | 1267 | break; |
| 1286 | case L2TP_ENCAPTYPE_IP: | 1268 | case L2TP_ENCAPTYPE_IP: |
| 1287 | break; | 1269 | break; |
| @@ -1311,7 +1293,7 @@ end: | |||
| 1311 | 1293 | ||
| 1312 | /* When the tunnel is closed, all the attached sessions need to go too. | 1294 | /* When the tunnel is closed, all the attached sessions need to go too. |
| 1313 | */ | 1295 | */ |
| 1314 | static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) | 1296 | void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) |
| 1315 | { | 1297 | { |
| 1316 | int hash; | 1298 | int hash; |
| 1317 | struct hlist_node *walk; | 1299 | struct hlist_node *walk; |
| @@ -1334,25 +1316,13 @@ again: | |||
| 1334 | 1316 | ||
| 1335 | hlist_del_init(&session->hlist); | 1317 | hlist_del_init(&session->hlist); |
| 1336 | 1318 | ||
| 1337 | /* Since we should hold the sock lock while | ||
| 1338 | * doing any unbinding, we need to release the | ||
| 1339 | * lock we're holding before taking that lock. | ||
| 1340 | * Hold a reference to the sock so it doesn't | ||
| 1341 | * disappear as we're jumping between locks. | ||
| 1342 | */ | ||
| 1343 | if (session->ref != NULL) | 1319 | if (session->ref != NULL) |
| 1344 | (*session->ref)(session); | 1320 | (*session->ref)(session); |
| 1345 | 1321 | ||
| 1346 | write_unlock_bh(&tunnel->hlist_lock); | 1322 | write_unlock_bh(&tunnel->hlist_lock); |
| 1347 | 1323 | ||
| 1348 | if (tunnel->version != L2TP_HDR_VER_2) { | 1324 | __l2tp_session_unhash(session); |
| 1349 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | 1325 | l2tp_session_queue_purge(session); |
| 1350 | |||
| 1351 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
| 1352 | hlist_del_init_rcu(&session->global_hlist); | ||
| 1353 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
| 1354 | synchronize_rcu(); | ||
| 1355 | } | ||
| 1356 | 1326 | ||
| 1357 | if (session->session_close != NULL) | 1327 | if (session->session_close != NULL) |
| 1358 | (*session->session_close)(session); | 1328 | (*session->session_close)(session); |
| @@ -1360,6 +1330,8 @@ again: | |||
| 1360 | if (session->deref != NULL) | 1330 | if (session->deref != NULL) |
| 1361 | (*session->deref)(session); | 1331 | (*session->deref)(session); |
| 1362 | 1332 | ||
| 1333 | l2tp_session_dec_refcount(session); | ||
| 1334 | |||
| 1363 | write_lock_bh(&tunnel->hlist_lock); | 1335 | write_lock_bh(&tunnel->hlist_lock); |
| 1364 | 1336 | ||
| 1365 | /* Now restart from the beginning of this hash | 1337 | /* Now restart from the beginning of this hash |
| @@ -1372,6 +1344,17 @@ again: | |||
| 1372 | } | 1344 | } |
| 1373 | write_unlock_bh(&tunnel->hlist_lock); | 1345 | write_unlock_bh(&tunnel->hlist_lock); |
| 1374 | } | 1346 | } |
| 1347 | EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); | ||
| 1348 | |||
| 1349 | /* Tunnel socket destroy hook for UDP encapsulation */ | ||
| 1350 | static void l2tp_udp_encap_destroy(struct sock *sk) | ||
| 1351 | { | ||
| 1352 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
| 1353 | if (tunnel) { | ||
| 1354 | l2tp_tunnel_closeall(tunnel); | ||
| 1355 | sock_put(sk); | ||
| 1356 | } | ||
| 1357 | } | ||
| 1375 | 1358 | ||
| 1376 | /* Really kill the tunnel. | 1359 | /* Really kill the tunnel. |
| 1377 | * Come here only when all sessions have been cleared from the tunnel. | 1360 | * Come here only when all sessions have been cleared from the tunnel. |
| @@ -1397,19 +1380,21 @@ static void l2tp_tunnel_del_work(struct work_struct *work) | |||
| 1397 | return; | 1380 | return; |
| 1398 | 1381 | ||
| 1399 | sock = sk->sk_socket; | 1382 | sock = sk->sk_socket; |
| 1400 | BUG_ON(!sock); | ||
| 1401 | 1383 | ||
| 1402 | /* If the tunnel socket was created directly by the kernel, use the | 1384 | /* If the tunnel socket was created by userspace, then go through the |
| 1403 | * sk_* API to release the socket now. Otherwise go through the | 1385 | * inet layer to shut the socket down, and let userspace close it. |
| 1404 | * inet_* layer to shut the socket down, and let userspace close it. | 1386 | * Otherwise, if we created the socket directly within the kernel, use |
| 1387 | * the sk API to release it here. | ||
| 1405 | * In either case the tunnel resources are freed in the socket | 1388 | * In either case the tunnel resources are freed in the socket |
| 1406 | * destructor when the tunnel socket goes away. | 1389 | * destructor when the tunnel socket goes away. |
| 1407 | */ | 1390 | */ |
| 1408 | if (sock->file == NULL) { | 1391 | if (tunnel->fd >= 0) { |
| 1409 | kernel_sock_shutdown(sock, SHUT_RDWR); | 1392 | if (sock) |
| 1410 | sk_release_kernel(sk); | 1393 | inet_shutdown(sock, 2); |
| 1411 | } else { | 1394 | } else { |
| 1412 | inet_shutdown(sock, 2); | 1395 | if (sock) |
| 1396 | kernel_sock_shutdown(sock, SHUT_RDWR); | ||
| 1397 | sk_release_kernel(sk); | ||
| 1413 | } | 1398 | } |
| 1414 | 1399 | ||
| 1415 | l2tp_tunnel_sock_put(sk); | 1400 | l2tp_tunnel_sock_put(sk); |
| @@ -1668,6 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
| 1668 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | 1653 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ |
| 1669 | udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; | 1654 | udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; |
| 1670 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; | 1655 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; |
| 1656 | udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; | ||
| 1671 | #if IS_ENABLED(CONFIG_IPV6) | 1657 | #if IS_ENABLED(CONFIG_IPV6) |
| 1672 | if (sk->sk_family == PF_INET6) | 1658 | if (sk->sk_family == PF_INET6) |
| 1673 | udpv6_encap_enable(); | 1659 | udpv6_encap_enable(); |
| @@ -1723,6 +1709,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | |||
| 1723 | */ | 1709 | */ |
| 1724 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | 1710 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) |
| 1725 | { | 1711 | { |
| 1712 | l2tp_tunnel_closeall(tunnel); | ||
| 1726 | return (false == queue_work(l2tp_wq, &tunnel->del_work)); | 1713 | return (false == queue_work(l2tp_wq, &tunnel->del_work)); |
| 1727 | } | 1714 | } |
| 1728 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | 1715 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); |
| @@ -1731,62 +1718,71 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | |||
| 1731 | */ | 1718 | */ |
| 1732 | void l2tp_session_free(struct l2tp_session *session) | 1719 | void l2tp_session_free(struct l2tp_session *session) |
| 1733 | { | 1720 | { |
| 1734 | struct l2tp_tunnel *tunnel; | 1721 | struct l2tp_tunnel *tunnel = session->tunnel; |
| 1735 | 1722 | ||
| 1736 | BUG_ON(atomic_read(&session->ref_count) != 0); | 1723 | BUG_ON(atomic_read(&session->ref_count) != 0); |
| 1737 | 1724 | ||
| 1738 | tunnel = session->tunnel; | 1725 | if (tunnel) { |
| 1739 | if (tunnel != NULL) { | ||
| 1740 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | 1726 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); |
| 1727 | if (session->session_id != 0) | ||
| 1728 | atomic_dec(&l2tp_session_count); | ||
| 1729 | sock_put(tunnel->sock); | ||
| 1730 | session->tunnel = NULL; | ||
| 1731 | l2tp_tunnel_dec_refcount(tunnel); | ||
| 1732 | } | ||
| 1733 | |||
| 1734 | kfree(session); | ||
| 1741 | 1735 | ||
| 1742 | /* Delete the session from the hash */ | 1736 | return; |
| 1737 | } | ||
| 1738 | EXPORT_SYMBOL_GPL(l2tp_session_free); | ||
| 1739 | |||
| 1740 | /* Remove an l2tp session from l2tp_core's hash lists. | ||
| 1741 | * Provides a tidyup interface for pseudowire code which can't just route all | ||
| 1742 | * shutdown via. l2tp_session_delete and a pseudowire-specific session_close | ||
| 1743 | * callback. | ||
| 1744 | */ | ||
| 1745 | void __l2tp_session_unhash(struct l2tp_session *session) | ||
| 1746 | { | ||
| 1747 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
| 1748 | |||
| 1749 | /* Remove the session from core hashes */ | ||
| 1750 | if (tunnel) { | ||
| 1751 | /* Remove from the per-tunnel hash */ | ||
| 1743 | write_lock_bh(&tunnel->hlist_lock); | 1752 | write_lock_bh(&tunnel->hlist_lock); |
| 1744 | hlist_del_init(&session->hlist); | 1753 | hlist_del_init(&session->hlist); |
| 1745 | write_unlock_bh(&tunnel->hlist_lock); | 1754 | write_unlock_bh(&tunnel->hlist_lock); |
| 1746 | 1755 | ||
| 1747 | /* Unlink from the global hash if not L2TPv2 */ | 1756 | /* For L2TPv3 we have a per-net hash: remove from there, too */ |
| 1748 | if (tunnel->version != L2TP_HDR_VER_2) { | 1757 | if (tunnel->version != L2TP_HDR_VER_2) { |
| 1749 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | 1758 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); |
| 1750 | |||
| 1751 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | 1759 | spin_lock_bh(&pn->l2tp_session_hlist_lock); |
| 1752 | hlist_del_init_rcu(&session->global_hlist); | 1760 | hlist_del_init_rcu(&session->global_hlist); |
| 1753 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | 1761 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); |
| 1754 | synchronize_rcu(); | 1762 | synchronize_rcu(); |
| 1755 | } | 1763 | } |
| 1756 | |||
| 1757 | if (session->session_id != 0) | ||
| 1758 | atomic_dec(&l2tp_session_count); | ||
| 1759 | |||
| 1760 | sock_put(tunnel->sock); | ||
| 1761 | |||
| 1762 | /* This will delete the tunnel context if this | ||
| 1763 | * is the last session on the tunnel. | ||
| 1764 | */ | ||
| 1765 | session->tunnel = NULL; | ||
| 1766 | l2tp_tunnel_dec_refcount(tunnel); | ||
| 1767 | } | 1764 | } |
| 1768 | |||
| 1769 | kfree(session); | ||
| 1770 | |||
| 1771 | return; | ||
| 1772 | } | 1765 | } |
| 1773 | EXPORT_SYMBOL_GPL(l2tp_session_free); | 1766 | EXPORT_SYMBOL_GPL(__l2tp_session_unhash); |
| 1774 | 1767 | ||
| 1775 | /* This function is used by the netlink SESSION_DELETE command and by | 1768 | /* This function is used by the netlink SESSION_DELETE command and by |
| 1776 | pseudowire modules. | 1769 | pseudowire modules. |
| 1777 | */ | 1770 | */ |
| 1778 | int l2tp_session_delete(struct l2tp_session *session) | 1771 | int l2tp_session_delete(struct l2tp_session *session) |
| 1779 | { | 1772 | { |
| 1773 | if (session->ref) | ||
| 1774 | (*session->ref)(session); | ||
| 1775 | __l2tp_session_unhash(session); | ||
| 1776 | l2tp_session_queue_purge(session); | ||
| 1780 | if (session->session_close != NULL) | 1777 | if (session->session_close != NULL) |
| 1781 | (*session->session_close)(session); | 1778 | (*session->session_close)(session); |
| 1782 | 1779 | if (session->deref) | |
| 1780 | (*session->ref)(session); | ||
| 1783 | l2tp_session_dec_refcount(session); | 1781 | l2tp_session_dec_refcount(session); |
| 1784 | |||
| 1785 | return 0; | 1782 | return 0; |
| 1786 | } | 1783 | } |
| 1787 | EXPORT_SYMBOL_GPL(l2tp_session_delete); | 1784 | EXPORT_SYMBOL_GPL(l2tp_session_delete); |
| 1788 | 1785 | ||
| 1789 | |||
| 1790 | /* We come here whenever a session's send_seq, cookie_len or | 1786 | /* We come here whenever a session's send_seq, cookie_len or |
| 1791 | * l2specific_len parameters are set. | 1787 | * l2specific_len parameters are set. |
| 1792 | */ | 1788 | */ |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 8eb8f1d47f3a..485a490fd990 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
| @@ -36,16 +36,15 @@ enum { | |||
| 36 | struct sk_buff; | 36 | struct sk_buff; |
| 37 | 37 | ||
| 38 | struct l2tp_stats { | 38 | struct l2tp_stats { |
| 39 | u64 tx_packets; | 39 | atomic_long_t tx_packets; |
| 40 | u64 tx_bytes; | 40 | atomic_long_t tx_bytes; |
| 41 | u64 tx_errors; | 41 | atomic_long_t tx_errors; |
| 42 | u64 rx_packets; | 42 | atomic_long_t rx_packets; |
| 43 | u64 rx_bytes; | 43 | atomic_long_t rx_bytes; |
| 44 | u64 rx_seq_discards; | 44 | atomic_long_t rx_seq_discards; |
| 45 | u64 rx_oos_packets; | 45 | atomic_long_t rx_oos_packets; |
| 46 | u64 rx_errors; | 46 | atomic_long_t rx_errors; |
| 47 | u64 rx_cookie_discards; | 47 | atomic_long_t rx_cookie_discards; |
| 48 | struct u64_stats_sync syncp; | ||
| 49 | }; | 48 | }; |
| 50 | 49 | ||
| 51 | struct l2tp_tunnel; | 50 | struct l2tp_tunnel; |
| @@ -240,11 +239,14 @@ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); | |||
| 240 | extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); | 239 | extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); |
| 241 | 240 | ||
| 242 | extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); | 241 | extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); |
| 242 | extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | ||
| 243 | extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); | 243 | extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); |
| 244 | extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); | 244 | extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); |
| 245 | extern void __l2tp_session_unhash(struct l2tp_session *session); | ||
| 245 | extern int l2tp_session_delete(struct l2tp_session *session); | 246 | extern int l2tp_session_delete(struct l2tp_session *session); |
| 246 | extern void l2tp_session_free(struct l2tp_session *session); | 247 | extern void l2tp_session_free(struct l2tp_session *session); |
| 247 | extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); | 248 | extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); |
| 249 | extern int l2tp_session_queue_purge(struct l2tp_session *session); | ||
| 248 | extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); | 250 | extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); |
| 249 | 251 | ||
| 250 | extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); | 252 | extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); |
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index c3813bc84552..072d7202e182 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c | |||
| @@ -146,14 +146,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) | |||
| 146 | tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, | 146 | tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, |
| 147 | atomic_read(&tunnel->ref_count)); | 147 | atomic_read(&tunnel->ref_count)); |
| 148 | 148 | ||
| 149 | seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", | 149 | seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n", |
| 150 | tunnel->debug, | 150 | tunnel->debug, |
| 151 | (unsigned long long)tunnel->stats.tx_packets, | 151 | atomic_long_read(&tunnel->stats.tx_packets), |
| 152 | (unsigned long long)tunnel->stats.tx_bytes, | 152 | atomic_long_read(&tunnel->stats.tx_bytes), |
| 153 | (unsigned long long)tunnel->stats.tx_errors, | 153 | atomic_long_read(&tunnel->stats.tx_errors), |
| 154 | (unsigned long long)tunnel->stats.rx_packets, | 154 | atomic_long_read(&tunnel->stats.rx_packets), |
| 155 | (unsigned long long)tunnel->stats.rx_bytes, | 155 | atomic_long_read(&tunnel->stats.rx_bytes), |
| 156 | (unsigned long long)tunnel->stats.rx_errors); | 156 | atomic_long_read(&tunnel->stats.rx_errors)); |
| 157 | 157 | ||
| 158 | if (tunnel->show != NULL) | 158 | if (tunnel->show != NULL) |
| 159 | tunnel->show(m, tunnel); | 159 | tunnel->show(m, tunnel); |
| @@ -203,14 +203,14 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) | |||
| 203 | seq_printf(m, "\n"); | 203 | seq_printf(m, "\n"); |
| 204 | } | 204 | } |
| 205 | 205 | ||
| 206 | seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", | 206 | seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n", |
| 207 | session->nr, session->ns, | 207 | session->nr, session->ns, |
| 208 | (unsigned long long)session->stats.tx_packets, | 208 | atomic_long_read(&session->stats.tx_packets), |
| 209 | (unsigned long long)session->stats.tx_bytes, | 209 | atomic_long_read(&session->stats.tx_bytes), |
| 210 | (unsigned long long)session->stats.tx_errors, | 210 | atomic_long_read(&session->stats.tx_errors), |
| 211 | (unsigned long long)session->stats.rx_packets, | 211 | atomic_long_read(&session->stats.rx_packets), |
| 212 | (unsigned long long)session->stats.rx_bytes, | 212 | atomic_long_read(&session->stats.rx_bytes), |
| 213 | (unsigned long long)session->stats.rx_errors); | 213 | atomic_long_read(&session->stats.rx_errors)); |
| 214 | 214 | ||
| 215 | if (session->show != NULL) | 215 | if (session->show != NULL) |
| 216 | session->show(m, session); | 216 | session->show(m, session); |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 7f41b7051269..571db8dd2292 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
| @@ -228,10 +228,16 @@ static void l2tp_ip_close(struct sock *sk, long timeout) | |||
| 228 | static void l2tp_ip_destroy_sock(struct sock *sk) | 228 | static void l2tp_ip_destroy_sock(struct sock *sk) |
| 229 | { | 229 | { |
| 230 | struct sk_buff *skb; | 230 | struct sk_buff *skb; |
| 231 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
| 231 | 232 | ||
| 232 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) | 233 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) |
| 233 | kfree_skb(skb); | 234 | kfree_skb(skb); |
| 234 | 235 | ||
| 236 | if (tunnel) { | ||
| 237 | l2tp_tunnel_closeall(tunnel); | ||
| 238 | sock_put(sk); | ||
| 239 | } | ||
| 240 | |||
| 235 | sk_refcnt_debug_dec(sk); | 241 | sk_refcnt_debug_dec(sk); |
| 236 | } | 242 | } |
| 237 | 243 | ||
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 41f2f8126ebc..b8a6039314e8 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
| @@ -241,10 +241,17 @@ static void l2tp_ip6_close(struct sock *sk, long timeout) | |||
| 241 | 241 | ||
| 242 | static void l2tp_ip6_destroy_sock(struct sock *sk) | 242 | static void l2tp_ip6_destroy_sock(struct sock *sk) |
| 243 | { | 243 | { |
| 244 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
| 245 | |||
| 244 | lock_sock(sk); | 246 | lock_sock(sk); |
| 245 | ip6_flush_pending_frames(sk); | 247 | ip6_flush_pending_frames(sk); |
| 246 | release_sock(sk); | 248 | release_sock(sk); |
| 247 | 249 | ||
| 250 | if (tunnel) { | ||
| 251 | l2tp_tunnel_closeall(tunnel); | ||
| 252 | sock_put(sk); | ||
| 253 | } | ||
| 254 | |||
| 248 | inet6_destroy_sock(sk); | 255 | inet6_destroy_sock(sk); |
| 249 | } | 256 | } |
| 250 | 257 | ||
| @@ -683,6 +690,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
| 683 | lsa->l2tp_addr = ipv6_hdr(skb)->saddr; | 690 | lsa->l2tp_addr = ipv6_hdr(skb)->saddr; |
| 684 | lsa->l2tp_flowinfo = 0; | 691 | lsa->l2tp_flowinfo = 0; |
| 685 | lsa->l2tp_scope_id = 0; | 692 | lsa->l2tp_scope_id = 0; |
| 693 | lsa->l2tp_conn_id = 0; | ||
| 686 | if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) | 694 | if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) |
| 687 | lsa->l2tp_scope_id = IP6CB(skb)->iif; | 695 | lsa->l2tp_scope_id = IP6CB(skb)->iif; |
| 688 | } | 696 | } |
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index c1bab22db85e..0825ff26e113 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c | |||
| @@ -246,8 +246,6 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla | |||
| 246 | #if IS_ENABLED(CONFIG_IPV6) | 246 | #if IS_ENABLED(CONFIG_IPV6) |
| 247 | struct ipv6_pinfo *np = NULL; | 247 | struct ipv6_pinfo *np = NULL; |
| 248 | #endif | 248 | #endif |
| 249 | struct l2tp_stats stats; | ||
| 250 | unsigned int start; | ||
| 251 | 249 | ||
| 252 | hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, | 250 | hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, |
| 253 | L2TP_CMD_TUNNEL_GET); | 251 | L2TP_CMD_TUNNEL_GET); |
| @@ -265,28 +263,22 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla | |||
| 265 | if (nest == NULL) | 263 | if (nest == NULL) |
| 266 | goto nla_put_failure; | 264 | goto nla_put_failure; |
| 267 | 265 | ||
| 268 | do { | 266 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, |
| 269 | start = u64_stats_fetch_begin(&tunnel->stats.syncp); | 267 | atomic_long_read(&tunnel->stats.tx_packets)) || |
| 270 | stats.tx_packets = tunnel->stats.tx_packets; | 268 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, |
| 271 | stats.tx_bytes = tunnel->stats.tx_bytes; | 269 | atomic_long_read(&tunnel->stats.tx_bytes)) || |
| 272 | stats.tx_errors = tunnel->stats.tx_errors; | 270 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, |
| 273 | stats.rx_packets = tunnel->stats.rx_packets; | 271 | atomic_long_read(&tunnel->stats.tx_errors)) || |
| 274 | stats.rx_bytes = tunnel->stats.rx_bytes; | 272 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, |
| 275 | stats.rx_errors = tunnel->stats.rx_errors; | 273 | atomic_long_read(&tunnel->stats.rx_packets)) || |
| 276 | stats.rx_seq_discards = tunnel->stats.rx_seq_discards; | 274 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, |
| 277 | stats.rx_oos_packets = tunnel->stats.rx_oos_packets; | 275 | atomic_long_read(&tunnel->stats.rx_bytes)) || |
| 278 | } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start)); | ||
| 279 | |||
| 280 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || | ||
| 281 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || | ||
| 282 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || | ||
| 283 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || | ||
| 284 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || | ||
| 285 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, | 276 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, |
| 286 | stats.rx_seq_discards) || | 277 | atomic_long_read(&tunnel->stats.rx_seq_discards)) || |
| 287 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, | 278 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, |
| 288 | stats.rx_oos_packets) || | 279 | atomic_long_read(&tunnel->stats.rx_oos_packets)) || |
| 289 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) | 280 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, |
| 281 | atomic_long_read(&tunnel->stats.rx_errors))) | ||
| 290 | goto nla_put_failure; | 282 | goto nla_put_failure; |
| 291 | nla_nest_end(skb, nest); | 283 | nla_nest_end(skb, nest); |
| 292 | 284 | ||
| @@ -612,8 +604,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl | |||
| 612 | struct nlattr *nest; | 604 | struct nlattr *nest; |
| 613 | struct l2tp_tunnel *tunnel = session->tunnel; | 605 | struct l2tp_tunnel *tunnel = session->tunnel; |
| 614 | struct sock *sk = NULL; | 606 | struct sock *sk = NULL; |
| 615 | struct l2tp_stats stats; | ||
| 616 | unsigned int start; | ||
| 617 | 607 | ||
| 618 | sk = tunnel->sock; | 608 | sk = tunnel->sock; |
| 619 | 609 | ||
| @@ -656,28 +646,22 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl | |||
| 656 | if (nest == NULL) | 646 | if (nest == NULL) |
| 657 | goto nla_put_failure; | 647 | goto nla_put_failure; |
| 658 | 648 | ||
| 659 | do { | 649 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, |
| 660 | start = u64_stats_fetch_begin(&session->stats.syncp); | 650 | atomic_long_read(&session->stats.tx_packets)) || |
| 661 | stats.tx_packets = session->stats.tx_packets; | 651 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, |
| 662 | stats.tx_bytes = session->stats.tx_bytes; | 652 | atomic_long_read(&session->stats.tx_bytes)) || |
| 663 | stats.tx_errors = session->stats.tx_errors; | 653 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, |
| 664 | stats.rx_packets = session->stats.rx_packets; | 654 | atomic_long_read(&session->stats.tx_errors)) || |
| 665 | stats.rx_bytes = session->stats.rx_bytes; | 655 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, |
| 666 | stats.rx_errors = session->stats.rx_errors; | 656 | atomic_long_read(&session->stats.rx_packets)) || |
| 667 | stats.rx_seq_discards = session->stats.rx_seq_discards; | 657 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, |
| 668 | stats.rx_oos_packets = session->stats.rx_oos_packets; | 658 | atomic_long_read(&session->stats.rx_bytes)) || |
| 669 | } while (u64_stats_fetch_retry(&session->stats.syncp, start)); | ||
| 670 | |||
| 671 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || | ||
| 672 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || | ||
| 673 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || | ||
| 674 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || | ||
| 675 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || | ||
| 676 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, | 659 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, |
| 677 | stats.rx_seq_discards) || | 660 | atomic_long_read(&session->stats.rx_seq_discards)) || |
| 678 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, | 661 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, |
| 679 | stats.rx_oos_packets) || | 662 | atomic_long_read(&session->stats.rx_oos_packets)) || |
| 680 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) | 663 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, |
| 664 | atomic_long_read(&session->stats.rx_errors))) | ||
| 681 | goto nla_put_failure; | 665 | goto nla_put_failure; |
| 682 | nla_nest_end(skb, nest); | 666 | nla_nest_end(skb, nest); |
| 683 | 667 | ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 3f4e3afc191a..637a341c1e2d 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
| @@ -97,6 +97,7 @@ | |||
| 97 | #include <net/ip.h> | 97 | #include <net/ip.h> |
| 98 | #include <net/udp.h> | 98 | #include <net/udp.h> |
| 99 | #include <net/xfrm.h> | 99 | #include <net/xfrm.h> |
| 100 | #include <net/inet_common.h> | ||
| 100 | 101 | ||
| 101 | #include <asm/byteorder.h> | 102 | #include <asm/byteorder.h> |
| 102 | #include <linux/atomic.h> | 103 | #include <linux/atomic.h> |
| @@ -259,7 +260,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int | |||
| 259 | session->name); | 260 | session->name); |
| 260 | 261 | ||
| 261 | /* Not bound. Nothing we can do, so discard. */ | 262 | /* Not bound. Nothing we can do, so discard. */ |
| 262 | session->stats.rx_errors++; | 263 | atomic_long_inc(&session->stats.rx_errors); |
| 263 | kfree_skb(skb); | 264 | kfree_skb(skb); |
| 264 | } | 265 | } |
| 265 | 266 | ||
| @@ -355,6 +356,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh | |||
| 355 | l2tp_xmit_skb(session, skb, session->hdr_len); | 356 | l2tp_xmit_skb(session, skb, session->hdr_len); |
| 356 | 357 | ||
| 357 | sock_put(ps->tunnel_sock); | 358 | sock_put(ps->tunnel_sock); |
| 359 | sock_put(sk); | ||
| 358 | 360 | ||
| 359 | return error; | 361 | return error; |
| 360 | 362 | ||
| @@ -446,34 +448,16 @@ static void pppol2tp_session_close(struct l2tp_session *session) | |||
| 446 | { | 448 | { |
| 447 | struct pppol2tp_session *ps = l2tp_session_priv(session); | 449 | struct pppol2tp_session *ps = l2tp_session_priv(session); |
| 448 | struct sock *sk = ps->sock; | 450 | struct sock *sk = ps->sock; |
| 449 | struct sk_buff *skb; | 451 | struct socket *sock = sk->sk_socket; |
| 450 | 452 | ||
| 451 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | 453 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); |
| 452 | 454 | ||
| 453 | if (session->session_id == 0) | ||
| 454 | goto out; | ||
| 455 | |||
| 456 | if (sk != NULL) { | ||
| 457 | lock_sock(sk); | ||
| 458 | |||
| 459 | if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { | ||
| 460 | pppox_unbind_sock(sk); | ||
| 461 | sk->sk_state = PPPOX_DEAD; | ||
| 462 | sk->sk_state_change(sk); | ||
| 463 | } | ||
| 464 | |||
| 465 | /* Purge any queued data */ | ||
| 466 | skb_queue_purge(&sk->sk_receive_queue); | ||
| 467 | skb_queue_purge(&sk->sk_write_queue); | ||
| 468 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
| 469 | kfree_skb(skb); | ||
| 470 | sock_put(sk); | ||
| 471 | } | ||
| 472 | 455 | ||
| 473 | release_sock(sk); | 456 | if (sock) { |
| 457 | inet_shutdown(sock, 2); | ||
| 458 | /* Don't let the session go away before our socket does */ | ||
| 459 | l2tp_session_inc_refcount(session); | ||
| 474 | } | 460 | } |
| 475 | |||
| 476 | out: | ||
| 477 | return; | 461 | return; |
| 478 | } | 462 | } |
| 479 | 463 | ||
| @@ -482,19 +466,12 @@ out: | |||
| 482 | */ | 466 | */ |
| 483 | static void pppol2tp_session_destruct(struct sock *sk) | 467 | static void pppol2tp_session_destruct(struct sock *sk) |
| 484 | { | 468 | { |
| 485 | struct l2tp_session *session; | 469 | struct l2tp_session *session = sk->sk_user_data; |
| 486 | 470 | if (session) { | |
| 487 | if (sk->sk_user_data != NULL) { | ||
| 488 | session = sk->sk_user_data; | ||
| 489 | if (session == NULL) | ||
| 490 | goto out; | ||
| 491 | |||
| 492 | sk->sk_user_data = NULL; | 471 | sk->sk_user_data = NULL; |
| 493 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | 472 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); |
| 494 | l2tp_session_dec_refcount(session); | 473 | l2tp_session_dec_refcount(session); |
| 495 | } | 474 | } |
| 496 | |||
| 497 | out: | ||
| 498 | return; | 475 | return; |
| 499 | } | 476 | } |
| 500 | 477 | ||
| @@ -524,16 +501,13 @@ static int pppol2tp_release(struct socket *sock) | |||
| 524 | session = pppol2tp_sock_to_session(sk); | 501 | session = pppol2tp_sock_to_session(sk); |
| 525 | 502 | ||
| 526 | /* Purge any queued data */ | 503 | /* Purge any queued data */ |
| 527 | skb_queue_purge(&sk->sk_receive_queue); | ||
| 528 | skb_queue_purge(&sk->sk_write_queue); | ||
| 529 | if (session != NULL) { | 504 | if (session != NULL) { |
| 530 | struct sk_buff *skb; | 505 | __l2tp_session_unhash(session); |
| 531 | while ((skb = skb_dequeue(&session->reorder_q))) { | 506 | l2tp_session_queue_purge(session); |
| 532 | kfree_skb(skb); | ||
| 533 | sock_put(sk); | ||
| 534 | } | ||
| 535 | sock_put(sk); | 507 | sock_put(sk); |
| 536 | } | 508 | } |
| 509 | skb_queue_purge(&sk->sk_receive_queue); | ||
| 510 | skb_queue_purge(&sk->sk_write_queue); | ||
| 537 | 511 | ||
| 538 | release_sock(sk); | 512 | release_sock(sk); |
| 539 | 513 | ||
| @@ -879,18 +853,6 @@ out: | |||
| 879 | return error; | 853 | return error; |
| 880 | } | 854 | } |
| 881 | 855 | ||
| 882 | /* Called when deleting sessions via the netlink interface. | ||
| 883 | */ | ||
| 884 | static int pppol2tp_session_delete(struct l2tp_session *session) | ||
| 885 | { | ||
| 886 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
| 887 | |||
| 888 | if (ps->sock == NULL) | ||
| 889 | l2tp_session_dec_refcount(session); | ||
| 890 | |||
| 891 | return 0; | ||
| 892 | } | ||
| 893 | |||
| 894 | #endif /* CONFIG_L2TP_V3 */ | 856 | #endif /* CONFIG_L2TP_V3 */ |
| 895 | 857 | ||
| 896 | /* getname() support. | 858 | /* getname() support. |
| @@ -1024,14 +986,14 @@ end: | |||
| 1024 | static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, | 986 | static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, |
| 1025 | struct l2tp_stats *stats) | 987 | struct l2tp_stats *stats) |
| 1026 | { | 988 | { |
| 1027 | dest->tx_packets = stats->tx_packets; | 989 | dest->tx_packets = atomic_long_read(&stats->tx_packets); |
| 1028 | dest->tx_bytes = stats->tx_bytes; | 990 | dest->tx_bytes = atomic_long_read(&stats->tx_bytes); |
| 1029 | dest->tx_errors = stats->tx_errors; | 991 | dest->tx_errors = atomic_long_read(&stats->tx_errors); |
| 1030 | dest->rx_packets = stats->rx_packets; | 992 | dest->rx_packets = atomic_long_read(&stats->rx_packets); |
| 1031 | dest->rx_bytes = stats->rx_bytes; | 993 | dest->rx_bytes = atomic_long_read(&stats->rx_bytes); |
| 1032 | dest->rx_seq_discards = stats->rx_seq_discards; | 994 | dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards); |
| 1033 | dest->rx_oos_packets = stats->rx_oos_packets; | 995 | dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets); |
| 1034 | dest->rx_errors = stats->rx_errors; | 996 | dest->rx_errors = atomic_long_read(&stats->rx_errors); |
| 1035 | } | 997 | } |
| 1036 | 998 | ||
| 1037 | /* Session ioctl helper. | 999 | /* Session ioctl helper. |
| @@ -1665,14 +1627,14 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) | |||
| 1665 | tunnel->name, | 1627 | tunnel->name, |
| 1666 | (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', | 1628 | (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', |
| 1667 | atomic_read(&tunnel->ref_count) - 1); | 1629 | atomic_read(&tunnel->ref_count) - 1); |
| 1668 | seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", | 1630 | seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n", |
| 1669 | tunnel->debug, | 1631 | tunnel->debug, |
| 1670 | (unsigned long long)tunnel->stats.tx_packets, | 1632 | atomic_long_read(&tunnel->stats.tx_packets), |
| 1671 | (unsigned long long)tunnel->stats.tx_bytes, | 1633 | atomic_long_read(&tunnel->stats.tx_bytes), |
| 1672 | (unsigned long long)tunnel->stats.tx_errors, | 1634 | atomic_long_read(&tunnel->stats.tx_errors), |
| 1673 | (unsigned long long)tunnel->stats.rx_packets, | 1635 | atomic_long_read(&tunnel->stats.rx_packets), |
| 1674 | (unsigned long long)tunnel->stats.rx_bytes, | 1636 | atomic_long_read(&tunnel->stats.rx_bytes), |
| 1675 | (unsigned long long)tunnel->stats.rx_errors); | 1637 | atomic_long_read(&tunnel->stats.rx_errors)); |
| 1676 | } | 1638 | } |
| 1677 | 1639 | ||
| 1678 | static void pppol2tp_seq_session_show(struct seq_file *m, void *v) | 1640 | static void pppol2tp_seq_session_show(struct seq_file *m, void *v) |
| @@ -1707,14 +1669,14 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) | |||
| 1707 | session->lns_mode ? "LNS" : "LAC", | 1669 | session->lns_mode ? "LNS" : "LAC", |
| 1708 | session->debug, | 1670 | session->debug, |
| 1709 | jiffies_to_msecs(session->reorder_timeout)); | 1671 | jiffies_to_msecs(session->reorder_timeout)); |
| 1710 | seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", | 1672 | seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n", |
| 1711 | session->nr, session->ns, | 1673 | session->nr, session->ns, |
| 1712 | (unsigned long long)session->stats.tx_packets, | 1674 | atomic_long_read(&session->stats.tx_packets), |
| 1713 | (unsigned long long)session->stats.tx_bytes, | 1675 | atomic_long_read(&session->stats.tx_bytes), |
| 1714 | (unsigned long long)session->stats.tx_errors, | 1676 | atomic_long_read(&session->stats.tx_errors), |
| 1715 | (unsigned long long)session->stats.rx_packets, | 1677 | atomic_long_read(&session->stats.rx_packets), |
| 1716 | (unsigned long long)session->stats.rx_bytes, | 1678 | atomic_long_read(&session->stats.rx_bytes), |
| 1717 | (unsigned long long)session->stats.rx_errors); | 1679 | atomic_long_read(&session->stats.rx_errors)); |
| 1718 | 1680 | ||
| 1719 | if (po) | 1681 | if (po) |
| 1720 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); | 1682 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); |
| @@ -1838,7 +1800,7 @@ static const struct pppox_proto pppol2tp_proto = { | |||
| 1838 | 1800 | ||
| 1839 | static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { | 1801 | static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { |
| 1840 | .session_create = pppol2tp_session_create, | 1802 | .session_create = pppol2tp_session_create, |
| 1841 | .session_delete = pppol2tp_session_delete, | 1803 | .session_delete = l2tp_session_delete, |
| 1842 | }; | 1804 | }; |
| 1843 | 1805 | ||
| 1844 | #endif /* CONFIG_L2TP_V3 */ | 1806 | #endif /* CONFIG_L2TP_V3 */ |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 88709882c464..48aaa89253e0 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
| @@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 720 | int target; /* Read at least this many bytes */ | 720 | int target; /* Read at least this many bytes */ |
| 721 | long timeo; | 721 | long timeo; |
| 722 | 722 | ||
| 723 | msg->msg_namelen = 0; | ||
| 724 | |||
| 723 | lock_sock(sk); | 725 | lock_sock(sk); |
| 724 | copied = -ENOTCONN; | 726 | copied = -ENOTCONN; |
| 725 | if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) | 727 | if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 09d96a8f6c2c..a6893602f87a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
| @@ -2582,7 +2582,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
| 2582 | list_del(&dep->list); | 2582 | list_del(&dep->list); |
| 2583 | mutex_unlock(&local->mtx); | 2583 | mutex_unlock(&local->mtx); |
| 2584 | 2584 | ||
| 2585 | ieee80211_roc_notify_destroy(dep); | 2585 | ieee80211_roc_notify_destroy(dep, true); |
| 2586 | return 0; | 2586 | return 0; |
| 2587 | } | 2587 | } |
| 2588 | 2588 | ||
| @@ -2622,7 +2622,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
| 2622 | ieee80211_start_next_roc(local); | 2622 | ieee80211_start_next_roc(local); |
| 2623 | mutex_unlock(&local->mtx); | 2623 | mutex_unlock(&local->mtx); |
| 2624 | 2624 | ||
| 2625 | ieee80211_roc_notify_destroy(found); | 2625 | ieee80211_roc_notify_destroy(found, true); |
| 2626 | } else { | 2626 | } else { |
| 2627 | /* work may be pending so use it all the time */ | 2627 | /* work may be pending so use it all the time */ |
| 2628 | found->abort = true; | 2628 | found->abort = true; |
| @@ -2632,6 +2632,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
| 2632 | 2632 | ||
| 2633 | /* work will clean up etc */ | 2633 | /* work will clean up etc */ |
| 2634 | flush_delayed_work(&found->work); | 2634 | flush_delayed_work(&found->work); |
| 2635 | WARN_ON(!found->to_be_freed); | ||
| 2636 | kfree(found); | ||
| 2635 | } | 2637 | } |
| 2636 | 2638 | ||
| 2637 | return 0; | 2639 | return 0; |
| @@ -3285,6 +3287,7 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, | |||
| 3285 | struct cfg80211_chan_def *chandef) | 3287 | struct cfg80211_chan_def *chandef) |
| 3286 | { | 3288 | { |
| 3287 | struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); | 3289 | struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); |
| 3290 | struct ieee80211_local *local = wiphy_priv(wiphy); | ||
| 3288 | struct ieee80211_chanctx_conf *chanctx_conf; | 3291 | struct ieee80211_chanctx_conf *chanctx_conf; |
| 3289 | int ret = -ENODATA; | 3292 | int ret = -ENODATA; |
| 3290 | 3293 | ||
| @@ -3293,6 +3296,16 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, | |||
| 3293 | if (chanctx_conf) { | 3296 | if (chanctx_conf) { |
| 3294 | *chandef = chanctx_conf->def; | 3297 | *chandef = chanctx_conf->def; |
| 3295 | ret = 0; | 3298 | ret = 0; |
| 3299 | } else if (local->open_count > 0 && | ||
| 3300 | local->open_count == local->monitors && | ||
| 3301 | sdata->vif.type == NL80211_IFTYPE_MONITOR) { | ||
| 3302 | if (local->use_chanctx) | ||
| 3303 | *chandef = local->monitor_chandef; | ||
| 3304 | else | ||
| 3305 | cfg80211_chandef_create(chandef, | ||
| 3306 | local->_oper_channel, | ||
| 3307 | local->_oper_channel_type); | ||
| 3308 | ret = 0; | ||
| 3296 | } | 3309 | } |
| 3297 | rcu_read_unlock(); | 3310 | rcu_read_unlock(); |
| 3298 | 3311 | ||
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 78c0d90dd641..931be419ab5a 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
| @@ -63,6 +63,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
| 63 | enum ieee80211_chanctx_mode mode) | 63 | enum ieee80211_chanctx_mode mode) |
| 64 | { | 64 | { |
| 65 | struct ieee80211_chanctx *ctx; | 65 | struct ieee80211_chanctx *ctx; |
| 66 | u32 changed; | ||
| 66 | int err; | 67 | int err; |
| 67 | 68 | ||
| 68 | lockdep_assert_held(&local->chanctx_mtx); | 69 | lockdep_assert_held(&local->chanctx_mtx); |
| @@ -76,6 +77,13 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
| 76 | ctx->conf.rx_chains_dynamic = 1; | 77 | ctx->conf.rx_chains_dynamic = 1; |
| 77 | ctx->mode = mode; | 78 | ctx->mode = mode; |
| 78 | 79 | ||
| 80 | /* acquire mutex to prevent idle from changing */ | ||
| 81 | mutex_lock(&local->mtx); | ||
| 82 | /* turn idle off *before* setting channel -- some drivers need that */ | ||
| 83 | changed = ieee80211_idle_off(local); | ||
| 84 | if (changed) | ||
| 85 | ieee80211_hw_config(local, changed); | ||
| 86 | |||
| 79 | if (!local->use_chanctx) { | 87 | if (!local->use_chanctx) { |
| 80 | local->_oper_channel_type = | 88 | local->_oper_channel_type = |
| 81 | cfg80211_get_chandef_type(chandef); | 89 | cfg80211_get_chandef_type(chandef); |
| @@ -85,14 +93,17 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
| 85 | err = drv_add_chanctx(local, ctx); | 93 | err = drv_add_chanctx(local, ctx); |
| 86 | if (err) { | 94 | if (err) { |
| 87 | kfree(ctx); | 95 | kfree(ctx); |
| 88 | return ERR_PTR(err); | 96 | ctx = ERR_PTR(err); |
| 97 | |||
| 98 | ieee80211_recalc_idle(local); | ||
| 99 | goto out; | ||
| 89 | } | 100 | } |
| 90 | } | 101 | } |
| 91 | 102 | ||
| 103 | /* and keep the mutex held until the new chanctx is on the list */ | ||
| 92 | list_add_rcu(&ctx->list, &local->chanctx_list); | 104 | list_add_rcu(&ctx->list, &local->chanctx_list); |
| 93 | 105 | ||
| 94 | mutex_lock(&local->mtx); | 106 | out: |
| 95 | ieee80211_recalc_idle(local); | ||
| 96 | mutex_unlock(&local->mtx); | 107 | mutex_unlock(&local->mtx); |
| 97 | 108 | ||
| 98 | return ctx; | 109 | return ctx; |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 388580a1bada..5672533a0832 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
| @@ -309,6 +309,7 @@ struct ieee80211_roc_work { | |||
| 309 | struct ieee80211_channel *chan; | 309 | struct ieee80211_channel *chan; |
| 310 | 310 | ||
| 311 | bool started, abort, hw_begun, notified; | 311 | bool started, abort, hw_begun, notified; |
| 312 | bool to_be_freed; | ||
| 312 | 313 | ||
| 313 | unsigned long hw_start_time; | 314 | unsigned long hw_start_time; |
| 314 | 315 | ||
| @@ -1347,7 +1348,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local); | |||
| 1347 | void ieee80211_roc_setup(struct ieee80211_local *local); | 1348 | void ieee80211_roc_setup(struct ieee80211_local *local); |
| 1348 | void ieee80211_start_next_roc(struct ieee80211_local *local); | 1349 | void ieee80211_start_next_roc(struct ieee80211_local *local); |
| 1349 | void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); | 1350 | void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); |
| 1350 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc); | 1351 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free); |
| 1351 | void ieee80211_sw_roc_work(struct work_struct *work); | 1352 | void ieee80211_sw_roc_work(struct work_struct *work); |
| 1352 | void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); | 1353 | void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); |
| 1353 | 1354 | ||
| @@ -1361,6 +1362,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
| 1361 | enum nl80211_iftype type); | 1362 | enum nl80211_iftype type); |
| 1362 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); | 1363 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); |
| 1363 | void ieee80211_remove_interfaces(struct ieee80211_local *local); | 1364 | void ieee80211_remove_interfaces(struct ieee80211_local *local); |
| 1365 | u32 ieee80211_idle_off(struct ieee80211_local *local); | ||
| 1364 | void ieee80211_recalc_idle(struct ieee80211_local *local); | 1366 | void ieee80211_recalc_idle(struct ieee80211_local *local); |
| 1365 | void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, | 1367 | void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, |
| 1366 | const int offset); | 1368 | const int offset); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 2c059e54e885..9ed49ad0380f 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
| @@ -78,7 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) | |||
| 78 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); | 78 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static u32 ieee80211_idle_off(struct ieee80211_local *local) | 81 | static u32 __ieee80211_idle_off(struct ieee80211_local *local) |
| 82 | { | 82 | { |
| 83 | if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) | 83 | if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) |
| 84 | return 0; | 84 | return 0; |
| @@ -87,7 +87,7 @@ static u32 ieee80211_idle_off(struct ieee80211_local *local) | |||
| 87 | return IEEE80211_CONF_CHANGE_IDLE; | 87 | return IEEE80211_CONF_CHANGE_IDLE; |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static u32 ieee80211_idle_on(struct ieee80211_local *local) | 90 | static u32 __ieee80211_idle_on(struct ieee80211_local *local) |
| 91 | { | 91 | { |
| 92 | if (local->hw.conf.flags & IEEE80211_CONF_IDLE) | 92 | if (local->hw.conf.flags & IEEE80211_CONF_IDLE) |
| 93 | return 0; | 93 | return 0; |
| @@ -98,16 +98,18 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local) | |||
| 98 | return IEEE80211_CONF_CHANGE_IDLE; | 98 | return IEEE80211_CONF_CHANGE_IDLE; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | void ieee80211_recalc_idle(struct ieee80211_local *local) | 101 | static u32 __ieee80211_recalc_idle(struct ieee80211_local *local, |
| 102 | bool force_active) | ||
| 102 | { | 103 | { |
| 103 | bool working = false, scanning, active; | 104 | bool working = false, scanning, active; |
| 104 | unsigned int led_trig_start = 0, led_trig_stop = 0; | 105 | unsigned int led_trig_start = 0, led_trig_stop = 0; |
| 105 | struct ieee80211_roc_work *roc; | 106 | struct ieee80211_roc_work *roc; |
| 106 | u32 change; | ||
| 107 | 107 | ||
| 108 | lockdep_assert_held(&local->mtx); | 108 | lockdep_assert_held(&local->mtx); |
| 109 | 109 | ||
| 110 | active = !list_empty(&local->chanctx_list); | 110 | active = force_active || |
| 111 | !list_empty(&local->chanctx_list) || | ||
| 112 | local->monitors; | ||
| 111 | 113 | ||
| 112 | if (!local->ops->remain_on_channel) { | 114 | if (!local->ops->remain_on_channel) { |
| 113 | list_for_each_entry(roc, &local->roc_list, list) { | 115 | list_for_each_entry(roc, &local->roc_list, list) { |
| @@ -132,9 +134,18 @@ void ieee80211_recalc_idle(struct ieee80211_local *local) | |||
| 132 | ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); | 134 | ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); |
| 133 | 135 | ||
| 134 | if (working || scanning || active) | 136 | if (working || scanning || active) |
| 135 | change = ieee80211_idle_off(local); | 137 | return __ieee80211_idle_off(local); |
| 136 | else | 138 | return __ieee80211_idle_on(local); |
| 137 | change = ieee80211_idle_on(local); | 139 | } |
| 140 | |||
| 141 | u32 ieee80211_idle_off(struct ieee80211_local *local) | ||
| 142 | { | ||
| 143 | return __ieee80211_recalc_idle(local, true); | ||
| 144 | } | ||
| 145 | |||
| 146 | void ieee80211_recalc_idle(struct ieee80211_local *local) | ||
| 147 | { | ||
| 148 | u32 change = __ieee80211_recalc_idle(local, false); | ||
| 138 | if (change) | 149 | if (change) |
| 139 | ieee80211_hw_config(local, change); | 150 | ieee80211_hw_config(local, change); |
| 140 | } | 151 | } |
| @@ -349,21 +360,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata) | |||
| 349 | static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | 360 | static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) |
| 350 | { | 361 | { |
| 351 | struct ieee80211_sub_if_data *sdata; | 362 | struct ieee80211_sub_if_data *sdata; |
| 352 | int ret = 0; | 363 | int ret; |
| 353 | 364 | ||
| 354 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) | 365 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) |
| 355 | return 0; | 366 | return 0; |
| 356 | 367 | ||
| 357 | mutex_lock(&local->iflist_mtx); | 368 | ASSERT_RTNL(); |
| 358 | 369 | ||
| 359 | if (local->monitor_sdata) | 370 | if (local->monitor_sdata) |
| 360 | goto out_unlock; | 371 | return 0; |
| 361 | 372 | ||
| 362 | sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); | 373 | sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); |
| 363 | if (!sdata) { | 374 | if (!sdata) |
| 364 | ret = -ENOMEM; | 375 | return -ENOMEM; |
| 365 | goto out_unlock; | ||
| 366 | } | ||
| 367 | 376 | ||
| 368 | /* set up data */ | 377 | /* set up data */ |
| 369 | sdata->local = local; | 378 | sdata->local = local; |
| @@ -377,13 +386,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | |||
| 377 | if (WARN_ON(ret)) { | 386 | if (WARN_ON(ret)) { |
| 378 | /* ok .. stupid driver, it asked for this! */ | 387 | /* ok .. stupid driver, it asked for this! */ |
| 379 | kfree(sdata); | 388 | kfree(sdata); |
| 380 | goto out_unlock; | 389 | return ret; |
| 381 | } | 390 | } |
| 382 | 391 | ||
| 383 | ret = ieee80211_check_queues(sdata); | 392 | ret = ieee80211_check_queues(sdata); |
| 384 | if (ret) { | 393 | if (ret) { |
| 385 | kfree(sdata); | 394 | kfree(sdata); |
| 386 | goto out_unlock; | 395 | return ret; |
| 387 | } | 396 | } |
| 388 | 397 | ||
| 389 | ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, | 398 | ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, |
| @@ -391,13 +400,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | |||
| 391 | if (ret) { | 400 | if (ret) { |
| 392 | drv_remove_interface(local, sdata); | 401 | drv_remove_interface(local, sdata); |
| 393 | kfree(sdata); | 402 | kfree(sdata); |
| 394 | goto out_unlock; | 403 | return ret; |
| 395 | } | 404 | } |
| 396 | 405 | ||
| 406 | mutex_lock(&local->iflist_mtx); | ||
| 397 | rcu_assign_pointer(local->monitor_sdata, sdata); | 407 | rcu_assign_pointer(local->monitor_sdata, sdata); |
| 398 | out_unlock: | ||
| 399 | mutex_unlock(&local->iflist_mtx); | 408 | mutex_unlock(&local->iflist_mtx); |
| 400 | return ret; | 409 | |
| 410 | return 0; | ||
| 401 | } | 411 | } |
| 402 | 412 | ||
| 403 | static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | 413 | static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) |
| @@ -407,14 +417,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | |||
| 407 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) | 417 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) |
| 408 | return; | 418 | return; |
| 409 | 419 | ||
| 420 | ASSERT_RTNL(); | ||
| 421 | |||
| 410 | mutex_lock(&local->iflist_mtx); | 422 | mutex_lock(&local->iflist_mtx); |
| 411 | 423 | ||
| 412 | sdata = rcu_dereference_protected(local->monitor_sdata, | 424 | sdata = rcu_dereference_protected(local->monitor_sdata, |
| 413 | lockdep_is_held(&local->iflist_mtx)); | 425 | lockdep_is_held(&local->iflist_mtx)); |
| 414 | if (!sdata) | 426 | if (!sdata) { |
| 415 | goto out_unlock; | 427 | mutex_unlock(&local->iflist_mtx); |
| 428 | return; | ||
| 429 | } | ||
| 416 | 430 | ||
| 417 | rcu_assign_pointer(local->monitor_sdata, NULL); | 431 | rcu_assign_pointer(local->monitor_sdata, NULL); |
| 432 | mutex_unlock(&local->iflist_mtx); | ||
| 433 | |||
| 418 | synchronize_net(); | 434 | synchronize_net(); |
| 419 | 435 | ||
| 420 | ieee80211_vif_release_channel(sdata); | 436 | ieee80211_vif_release_channel(sdata); |
| @@ -422,8 +438,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | |||
| 422 | drv_remove_interface(local, sdata); | 438 | drv_remove_interface(local, sdata); |
| 423 | 439 | ||
| 424 | kfree(sdata); | 440 | kfree(sdata); |
| 425 | out_unlock: | ||
| 426 | mutex_unlock(&local->iflist_mtx); | ||
| 427 | } | 441 | } |
| 428 | 442 | ||
| 429 | /* | 443 | /* |
| @@ -541,6 +555,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) | |||
| 541 | 555 | ||
| 542 | ieee80211_adjust_monitor_flags(sdata, 1); | 556 | ieee80211_adjust_monitor_flags(sdata, 1); |
| 543 | ieee80211_configure_filter(local); | 557 | ieee80211_configure_filter(local); |
| 558 | mutex_lock(&local->mtx); | ||
| 559 | ieee80211_recalc_idle(local); | ||
| 560 | mutex_unlock(&local->mtx); | ||
| 544 | 561 | ||
| 545 | netif_carrier_on(dev); | 562 | netif_carrier_on(dev); |
| 546 | break; | 563 | break; |
| @@ -812,6 +829,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, | |||
| 812 | 829 | ||
| 813 | ieee80211_adjust_monitor_flags(sdata, -1); | 830 | ieee80211_adjust_monitor_flags(sdata, -1); |
| 814 | ieee80211_configure_filter(local); | 831 | ieee80211_configure_filter(local); |
| 832 | mutex_lock(&local->mtx); | ||
| 833 | ieee80211_recalc_idle(local); | ||
| 834 | mutex_unlock(&local->mtx); | ||
| 815 | break; | 835 | break; |
| 816 | case NL80211_IFTYPE_P2P_DEVICE: | 836 | case NL80211_IFTYPE_P2P_DEVICE: |
| 817 | /* relies on synchronize_rcu() below */ | 837 | /* relies on synchronize_rcu() below */ |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 29ce2aa87e7b..4749b3858695 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
| @@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) | |||
| 1060 | 1060 | ||
| 1061 | rcu_read_lock(); | 1061 | rcu_read_lock(); |
| 1062 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 1062 | list_for_each_entry_rcu(sdata, &local->interfaces, list) |
| 1063 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 1063 | if (ieee80211_vif_is_mesh(&sdata->vif) && |
| 1064 | ieee80211_sdata_running(sdata)) | ||
| 1064 | ieee80211_queue_work(&local->hw, &sdata->work); | 1065 | ieee80211_queue_work(&local->hw, &sdata->work); |
| 1065 | rcu_read_unlock(); | 1066 | rcu_read_unlock(); |
| 1066 | } | 1067 | } |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9f6464f3e05f..346ad4cfb013 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
| @@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, | |||
| 647 | our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) & | 647 | our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) & |
| 648 | mask) >> shift; | 648 | mask) >> shift; |
| 649 | 649 | ||
| 650 | if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED) | ||
| 651 | continue; | ||
| 652 | |||
| 650 | switch (ap_mcs) { | 653 | switch (ap_mcs) { |
| 651 | default: | 654 | default: |
| 652 | if (our_mcs <= ap_mcs) | 655 | if (our_mcs <= ap_mcs) |
| @@ -3503,6 +3506,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) | |||
| 3503 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 3506 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
| 3504 | 3507 | ||
| 3505 | /* | 3508 | /* |
| 3509 | * Stop timers before deleting work items, as timers | ||
| 3510 | * could race and re-add the work-items. They will be | ||
| 3511 | * re-established on connection. | ||
| 3512 | */ | ||
| 3513 | del_timer_sync(&ifmgd->conn_mon_timer); | ||
| 3514 | del_timer_sync(&ifmgd->bcn_mon_timer); | ||
| 3515 | |||
| 3516 | /* | ||
| 3506 | * we need to use atomic bitops for the running bits | 3517 | * we need to use atomic bitops for the running bits |
| 3507 | * only because both timers might fire at the same | 3518 | * only because both timers might fire at the same |
| 3508 | * time -- the code here is properly synchronised. | 3519 | * time -- the code here is properly synchronised. |
| @@ -3516,13 +3527,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) | |||
| 3516 | if (del_timer_sync(&ifmgd->timer)) | 3527 | if (del_timer_sync(&ifmgd->timer)) |
| 3517 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); | 3528 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); |
| 3518 | 3529 | ||
| 3519 | cancel_work_sync(&ifmgd->chswitch_work); | ||
| 3520 | if (del_timer_sync(&ifmgd->chswitch_timer)) | 3530 | if (del_timer_sync(&ifmgd->chswitch_timer)) |
| 3521 | set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); | 3531 | set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); |
| 3522 | 3532 | cancel_work_sync(&ifmgd->chswitch_work); | |
| 3523 | /* these will just be re-established on connection */ | ||
| 3524 | del_timer_sync(&ifmgd->conn_mon_timer); | ||
| 3525 | del_timer_sync(&ifmgd->bcn_mon_timer); | ||
| 3526 | } | 3533 | } |
| 3527 | 3534 | ||
| 3528 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) | 3535 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) |
| @@ -3601,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) | |||
| 3601 | 3608 | ||
| 3602 | /* Restart STA timers */ | 3609 | /* Restart STA timers */ |
| 3603 | rcu_read_lock(); | 3610 | rcu_read_lock(); |
| 3604 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 3611 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
| 3605 | ieee80211_restart_sta_timer(sdata); | 3612 | if (ieee80211_sdata_running(sdata)) |
| 3613 | ieee80211_restart_sta_timer(sdata); | ||
| 3614 | } | ||
| 3606 | rcu_read_unlock(); | 3615 | rcu_read_unlock(); |
| 3607 | } | 3616 | } |
| 3608 | 3617 | ||
| @@ -3955,8 +3964,16 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, | |||
| 3955 | /* prep auth_data so we don't go into idle on disassoc */ | 3964 | /* prep auth_data so we don't go into idle on disassoc */ |
| 3956 | ifmgd->auth_data = auth_data; | 3965 | ifmgd->auth_data = auth_data; |
| 3957 | 3966 | ||
| 3958 | if (ifmgd->associated) | 3967 | if (ifmgd->associated) { |
| 3959 | ieee80211_set_disassoc(sdata, 0, 0, false, NULL); | 3968 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; |
| 3969 | |||
| 3970 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | ||
| 3971 | WLAN_REASON_UNSPECIFIED, | ||
| 3972 | false, frame_buf); | ||
| 3973 | |||
| 3974 | __cfg80211_send_deauth(sdata->dev, frame_buf, | ||
| 3975 | sizeof(frame_buf)); | ||
| 3976 | } | ||
| 3960 | 3977 | ||
| 3961 | sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); | 3978 | sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); |
| 3962 | 3979 | ||
| @@ -4016,8 +4033,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
| 4016 | 4033 | ||
| 4017 | mutex_lock(&ifmgd->mtx); | 4034 | mutex_lock(&ifmgd->mtx); |
| 4018 | 4035 | ||
| 4019 | if (ifmgd->associated) | 4036 | if (ifmgd->associated) { |
| 4020 | ieee80211_set_disassoc(sdata, 0, 0, false, NULL); | 4037 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; |
| 4038 | |||
| 4039 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | ||
| 4040 | WLAN_REASON_UNSPECIFIED, | ||
| 4041 | false, frame_buf); | ||
| 4042 | |||
| 4043 | __cfg80211_send_deauth(sdata->dev, frame_buf, | ||
| 4044 | sizeof(frame_buf)); | ||
| 4045 | } | ||
| 4021 | 4046 | ||
| 4022 | if (ifmgd->auth_data && !ifmgd->auth_data->done) { | 4047 | if (ifmgd->auth_data && !ifmgd->auth_data->done) { |
| 4023 | err = -EBUSY; | 4048 | err = -EBUSY; |
| @@ -4315,6 +4340,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) | |||
| 4315 | { | 4340 | { |
| 4316 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 4341 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
| 4317 | 4342 | ||
| 4343 | /* | ||
| 4344 | * Make sure some work items will not run after this, | ||
| 4345 | * they will not do anything but might not have been | ||
| 4346 | * cancelled when disconnecting. | ||
| 4347 | */ | ||
| 4348 | cancel_work_sync(&ifmgd->monitor_work); | ||
| 4349 | cancel_work_sync(&ifmgd->beacon_connection_loss_work); | ||
| 4350 | cancel_work_sync(&ifmgd->request_smps_work); | ||
| 4351 | cancel_work_sync(&ifmgd->csa_connection_drop_work); | ||
| 4352 | cancel_work_sync(&ifmgd->chswitch_work); | ||
| 4353 | |||
| 4318 | mutex_lock(&ifmgd->mtx); | 4354 | mutex_lock(&ifmgd->mtx); |
| 4319 | if (ifmgd->assoc_data) | 4355 | if (ifmgd->assoc_data) |
| 4320 | ieee80211_destroy_assoc_data(sdata, false); | 4356 | ieee80211_destroy_assoc_data(sdata, false); |
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index cc79b4a2e821..430bd254e496 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
| @@ -297,10 +297,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local) | |||
| 297 | } | 297 | } |
| 298 | } | 298 | } |
| 299 | 299 | ||
| 300 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) | 300 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free) |
| 301 | { | 301 | { |
| 302 | struct ieee80211_roc_work *dep, *tmp; | 302 | struct ieee80211_roc_work *dep, *tmp; |
| 303 | 303 | ||
| 304 | if (WARN_ON(roc->to_be_freed)) | ||
| 305 | return; | ||
| 306 | |||
| 304 | /* was never transmitted */ | 307 | /* was never transmitted */ |
| 305 | if (roc->frame) { | 308 | if (roc->frame) { |
| 306 | cfg80211_mgmt_tx_status(&roc->sdata->wdev, | 309 | cfg80211_mgmt_tx_status(&roc->sdata->wdev, |
| @@ -316,9 +319,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) | |||
| 316 | GFP_KERNEL); | 319 | GFP_KERNEL); |
| 317 | 320 | ||
| 318 | list_for_each_entry_safe(dep, tmp, &roc->dependents, list) | 321 | list_for_each_entry_safe(dep, tmp, &roc->dependents, list) |
| 319 | ieee80211_roc_notify_destroy(dep); | 322 | ieee80211_roc_notify_destroy(dep, true); |
| 320 | 323 | ||
| 321 | kfree(roc); | 324 | if (free) |
| 325 | kfree(roc); | ||
| 326 | else | ||
| 327 | roc->to_be_freed = true; | ||
| 322 | } | 328 | } |
| 323 | 329 | ||
| 324 | void ieee80211_sw_roc_work(struct work_struct *work) | 330 | void ieee80211_sw_roc_work(struct work_struct *work) |
| @@ -331,6 +337,9 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
| 331 | 337 | ||
| 332 | mutex_lock(&local->mtx); | 338 | mutex_lock(&local->mtx); |
| 333 | 339 | ||
| 340 | if (roc->to_be_freed) | ||
| 341 | goto out_unlock; | ||
| 342 | |||
| 334 | if (roc->abort) | 343 | if (roc->abort) |
| 335 | goto finish; | 344 | goto finish; |
| 336 | 345 | ||
| @@ -370,7 +379,7 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
| 370 | finish: | 379 | finish: |
| 371 | list_del(&roc->list); | 380 | list_del(&roc->list); |
| 372 | started = roc->started; | 381 | started = roc->started; |
| 373 | ieee80211_roc_notify_destroy(roc); | 382 | ieee80211_roc_notify_destroy(roc, !roc->abort); |
| 374 | 383 | ||
| 375 | if (started) { | 384 | if (started) { |
| 376 | drv_flush(local, false); | 385 | drv_flush(local, false); |
| @@ -410,7 +419,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work) | |||
| 410 | 419 | ||
| 411 | list_del(&roc->list); | 420 | list_del(&roc->list); |
| 412 | 421 | ||
| 413 | ieee80211_roc_notify_destroy(roc); | 422 | ieee80211_roc_notify_destroy(roc, true); |
| 414 | 423 | ||
| 415 | /* if there's another roc, start it now */ | 424 | /* if there's another roc, start it now */ |
| 416 | ieee80211_start_next_roc(local); | 425 | ieee80211_start_next_roc(local); |
| @@ -460,12 +469,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata) | |||
| 460 | list_for_each_entry_safe(roc, tmp, &tmp_list, list) { | 469 | list_for_each_entry_safe(roc, tmp, &tmp_list, list) { |
| 461 | if (local->ops->remain_on_channel) { | 470 | if (local->ops->remain_on_channel) { |
| 462 | list_del(&roc->list); | 471 | list_del(&roc->list); |
| 463 | ieee80211_roc_notify_destroy(roc); | 472 | ieee80211_roc_notify_destroy(roc, true); |
| 464 | } else { | 473 | } else { |
| 465 | ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); | 474 | ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); |
| 466 | 475 | ||
| 467 | /* work will clean up etc */ | 476 | /* work will clean up etc */ |
| 468 | flush_delayed_work(&roc->work); | 477 | flush_delayed_work(&roc->work); |
| 478 | WARN_ON(!roc->to_be_freed); | ||
| 479 | kfree(roc); | ||
| 469 | } | 480 | } |
| 470 | } | 481 | } |
| 471 | 482 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bb73ed2d20b9..c6844ad080be 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) | |||
| 2675 | 2675 | ||
| 2676 | memset(nskb->cb, 0, sizeof(nskb->cb)); | 2676 | memset(nskb->cb, 0, sizeof(nskb->cb)); |
| 2677 | 2677 | ||
| 2678 | ieee80211_tx_skb(rx->sdata, nskb); | 2678 | if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { |
| 2679 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); | ||
| 2680 | |||
| 2681 | info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | | ||
| 2682 | IEEE80211_TX_INTFL_OFFCHAN_TX_OK | | ||
| 2683 | IEEE80211_TX_CTL_NO_CCK_RATE; | ||
| 2684 | if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) | ||
| 2685 | info->hw_queue = | ||
| 2686 | local->hw.offchannel_tx_hw_queue; | ||
| 2687 | } | ||
| 2688 | |||
| 2689 | __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, | ||
| 2690 | status->band); | ||
| 2679 | } | 2691 | } |
| 2680 | dev_kfree_skb(rx->skb); | 2692 | dev_kfree_skb(rx->skb); |
| 2681 | return RX_QUEUED; | 2693 | return RX_QUEUED; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a79ce820cb50..238a0cca320e 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
| @@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
| 766 | struct ieee80211_local *local; | 766 | struct ieee80211_local *local; |
| 767 | struct ieee80211_sub_if_data *sdata; | 767 | struct ieee80211_sub_if_data *sdata; |
| 768 | int ret, i; | 768 | int ret, i; |
| 769 | bool have_key = false; | ||
| 769 | 770 | ||
| 770 | might_sleep(); | 771 | might_sleep(); |
| 771 | 772 | ||
| @@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
| 793 | list_del_rcu(&sta->list); | 794 | list_del_rcu(&sta->list); |
| 794 | 795 | ||
| 795 | mutex_lock(&local->key_mtx); | 796 | mutex_lock(&local->key_mtx); |
| 796 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) | 797 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) { |
| 797 | __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); | 798 | __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); |
| 798 | if (sta->ptk) | 799 | have_key = true; |
| 800 | } | ||
| 801 | if (sta->ptk) { | ||
| 799 | __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); | 802 | __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); |
| 803 | have_key = true; | ||
| 804 | } | ||
| 800 | mutex_unlock(&local->key_mtx); | 805 | mutex_unlock(&local->key_mtx); |
| 801 | 806 | ||
| 807 | if (!have_key) | ||
| 808 | synchronize_net(); | ||
| 809 | |||
| 802 | sta->dead = true; | 810 | sta->dead = true; |
| 803 | 811 | ||
| 804 | local->num_sta--; | 812 | local->num_sta--; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index de8548bf0a7f..8914d2d2881a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
| @@ -1231,34 +1231,40 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local, | |||
| 1231 | if (local->queue_stop_reasons[q] || | 1231 | if (local->queue_stop_reasons[q] || |
| 1232 | (!txpending && !skb_queue_empty(&local->pending[q]))) { | 1232 | (!txpending && !skb_queue_empty(&local->pending[q]))) { |
| 1233 | if (unlikely(info->flags & | 1233 | if (unlikely(info->flags & |
| 1234 | IEEE80211_TX_INTFL_OFFCHAN_TX_OK && | 1234 | IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) { |
| 1235 | local->queue_stop_reasons[q] & | 1235 | if (local->queue_stop_reasons[q] & |
| 1236 | ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) { | 1236 | ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) { |
| 1237 | /* | ||
| 1238 | * Drop off-channel frames if queues | ||
| 1239 | * are stopped for any reason other | ||
| 1240 | * than off-channel operation. Never | ||
| 1241 | * queue them. | ||
| 1242 | */ | ||
| 1243 | spin_unlock_irqrestore( | ||
| 1244 | &local->queue_stop_reason_lock, | ||
| 1245 | flags); | ||
| 1246 | ieee80211_purge_tx_queue(&local->hw, | ||
| 1247 | skbs); | ||
| 1248 | return true; | ||
| 1249 | } | ||
| 1250 | } else { | ||
| 1251 | |||
| 1237 | /* | 1252 | /* |
| 1238 | * Drop off-channel frames if queues are stopped | 1253 | * Since queue is stopped, queue up frames for |
| 1239 | * for any reason other than off-channel | 1254 | * later transmission from the tx-pending |
| 1240 | * operation. Never queue them. | 1255 | * tasklet when the queue is woken again. |
| 1241 | */ | 1256 | */ |
| 1242 | spin_unlock_irqrestore( | 1257 | if (txpending) |
| 1243 | &local->queue_stop_reason_lock, flags); | 1258 | skb_queue_splice_init(skbs, |
| 1244 | ieee80211_purge_tx_queue(&local->hw, skbs); | 1259 | &local->pending[q]); |
| 1245 | return true; | 1260 | else |
| 1261 | skb_queue_splice_tail_init(skbs, | ||
| 1262 | &local->pending[q]); | ||
| 1263 | |||
| 1264 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | ||
| 1265 | flags); | ||
| 1266 | return false; | ||
| 1246 | } | 1267 | } |
| 1247 | |||
| 1248 | /* | ||
| 1249 | * Since queue is stopped, queue up frames for later | ||
| 1250 | * transmission from the tx-pending tasklet when the | ||
| 1251 | * queue is woken again. | ||
| 1252 | */ | ||
| 1253 | if (txpending) | ||
| 1254 | skb_queue_splice_init(skbs, &local->pending[q]); | ||
| 1255 | else | ||
| 1256 | skb_queue_splice_tail_init(skbs, | ||
| 1257 | &local->pending[q]); | ||
| 1258 | |||
| 1259 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, | ||
| 1260 | flags); | ||
| 1261 | return false; | ||
| 1262 | } | 1268 | } |
| 1263 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 1269 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
| 1264 | 1270 | ||
| @@ -1844,9 +1850,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
| 1844 | } | 1850 | } |
| 1845 | 1851 | ||
| 1846 | if (!is_multicast_ether_addr(skb->data)) { | 1852 | if (!is_multicast_ether_addr(skb->data)) { |
| 1853 | struct sta_info *next_hop; | ||
| 1854 | bool mpp_lookup = true; | ||
| 1855 | |||
| 1847 | mpath = mesh_path_lookup(sdata, skb->data); | 1856 | mpath = mesh_path_lookup(sdata, skb->data); |
| 1848 | if (!mpath) | 1857 | if (mpath) { |
| 1858 | mpp_lookup = false; | ||
| 1859 | next_hop = rcu_dereference(mpath->next_hop); | ||
| 1860 | if (!next_hop || | ||
| 1861 | !(mpath->flags & (MESH_PATH_ACTIVE | | ||
| 1862 | MESH_PATH_RESOLVING))) | ||
| 1863 | mpp_lookup = true; | ||
| 1864 | } | ||
| 1865 | |||
| 1866 | if (mpp_lookup) | ||
| 1849 | mppath = mpp_path_lookup(sdata, skb->data); | 1867 | mppath = mpp_path_lookup(sdata, skb->data); |
| 1868 | |||
| 1869 | if (mppath && mpath) | ||
| 1870 | mesh_path_del(mpath->sdata, mpath->dst); | ||
| 1850 | } | 1871 | } |
| 1851 | 1872 | ||
| 1852 | /* | 1873 | /* |
| @@ -2350,9 +2371,9 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, | |||
| 2350 | if (local->tim_in_locked_section) { | 2371 | if (local->tim_in_locked_section) { |
| 2351 | __ieee80211_beacon_add_tim(sdata, ps, skb); | 2372 | __ieee80211_beacon_add_tim(sdata, ps, skb); |
| 2352 | } else { | 2373 | } else { |
| 2353 | spin_lock(&local->tim_lock); | 2374 | spin_lock_bh(&local->tim_lock); |
| 2354 | __ieee80211_beacon_add_tim(sdata, ps, skb); | 2375 | __ieee80211_beacon_add_tim(sdata, ps, skb); |
| 2355 | spin_unlock(&local->tim_lock); | 2376 | spin_unlock_bh(&local->tim_lock); |
| 2356 | } | 2377 | } |
| 2357 | 2378 | ||
| 2358 | return 0; | 2379 | return 0; |
| @@ -2724,7 +2745,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
| 2724 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | 2745 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); |
| 2725 | } | 2746 | } |
| 2726 | 2747 | ||
| 2727 | sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); | 2748 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
| 2749 | sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); | ||
| 2728 | if (!ieee80211_tx_prepare(sdata, &tx, skb)) | 2750 | if (!ieee80211_tx_prepare(sdata, &tx, skb)) |
| 2729 | break; | 2751 | break; |
| 2730 | dev_kfree_skb_any(skb); | 2752 | dev_kfree_skb_any(skb); |
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 0f92dc24cb89..d7df6ac2c6f1 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c | |||
| @@ -339,7 +339,11 @@ bitmap_ipmac_tlist(const struct ip_set *set, | |||
| 339 | nla_put_failure: | 339 | nla_put_failure: |
| 340 | nla_nest_cancel(skb, nested); | 340 | nla_nest_cancel(skb, nested); |
| 341 | ipset_nest_end(skb, atd); | 341 | ipset_nest_end(skb, atd); |
| 342 | return -EMSGSIZE; | 342 | if (unlikely(id == first)) { |
| 343 | cb->args[2] = 0; | ||
| 344 | return -EMSGSIZE; | ||
| 345 | } | ||
| 346 | return 0; | ||
| 343 | } | 347 | } |
| 344 | 348 | ||
| 345 | static int | 349 | static int |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index f82b2e606cfd..1ba9dbc0e107 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
| @@ -1470,7 +1470,8 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb, | |||
| 1470 | if (ret == -EAGAIN) | 1470 | if (ret == -EAGAIN) |
| 1471 | ret = 1; | 1471 | ret = 1; |
| 1472 | 1472 | ||
| 1473 | return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST; | 1473 | return (ret < 0 && ret != -ENOTEMPTY) ? ret : |
| 1474 | ret > 0 ? 0 : -IPSET_ERR_EXIST; | ||
| 1474 | } | 1475 | } |
| 1475 | 1476 | ||
| 1476 | /* Get headed data of a set */ | 1477 | /* Get headed data of a set */ |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index f2627226a087..10a30b4fc7db 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c | |||
| @@ -104,6 +104,15 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags) | |||
| 104 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); | 104 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static inline void | ||
| 108 | hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *dst, u32 *flags) | ||
| 109 | { | ||
| 110 | if (dst->nomatch) { | ||
| 111 | *flags = IPSET_FLAG_NOMATCH; | ||
| 112 | dst->nomatch = 0; | ||
| 113 | } | ||
| 114 | } | ||
| 115 | |||
| 107 | static inline int | 116 | static inline int |
| 108 | hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem) | 117 | hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem) |
| 109 | { | 118 | { |
| @@ -414,6 +423,15 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags) | |||
| 414 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); | 423 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
| 415 | } | 424 | } |
| 416 | 425 | ||
| 426 | static inline void | ||
| 427 | hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *dst, u32 *flags) | ||
| 428 | { | ||
| 429 | if (dst->nomatch) { | ||
| 430 | *flags = IPSET_FLAG_NOMATCH; | ||
| 431 | dst->nomatch = 0; | ||
| 432 | } | ||
| 433 | } | ||
| 434 | |||
| 417 | static inline int | 435 | static inline int |
| 418 | hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem) | 436 | hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem) |
| 419 | { | 437 | { |
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 4b677cf6bf7d..d6a59154d710 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c | |||
| @@ -87,7 +87,16 @@ hash_net4_data_copy(struct hash_net4_elem *dst, | |||
| 87 | static inline void | 87 | static inline void |
| 88 | hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags) | 88 | hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags) |
| 89 | { | 89 | { |
| 90 | dst->nomatch = flags & IPSET_FLAG_NOMATCH; | 90 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
| 91 | } | ||
| 92 | |||
| 93 | static inline void | ||
| 94 | hash_net4_data_reset_flags(struct hash_net4_elem *dst, u32 *flags) | ||
| 95 | { | ||
| 96 | if (dst->nomatch) { | ||
| 97 | *flags = IPSET_FLAG_NOMATCH; | ||
| 98 | dst->nomatch = 0; | ||
| 99 | } | ||
| 91 | } | 100 | } |
| 92 | 101 | ||
| 93 | static inline int | 102 | static inline int |
| @@ -308,7 +317,16 @@ hash_net6_data_copy(struct hash_net6_elem *dst, | |||
| 308 | static inline void | 317 | static inline void |
| 309 | hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags) | 318 | hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags) |
| 310 | { | 319 | { |
| 311 | dst->nomatch = flags & IPSET_FLAG_NOMATCH; | 320 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
| 321 | } | ||
| 322 | |||
| 323 | static inline void | ||
| 324 | hash_net6_data_reset_flags(struct hash_net6_elem *dst, u32 *flags) | ||
| 325 | { | ||
| 326 | if (dst->nomatch) { | ||
| 327 | *flags = IPSET_FLAG_NOMATCH; | ||
| 328 | dst->nomatch = 0; | ||
| 329 | } | ||
| 312 | } | 330 | } |
| 313 | 331 | ||
| 314 | static inline int | 332 | static inline int |
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 6ba985f1c96f..f2b0a3c30130 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c | |||
| @@ -198,7 +198,16 @@ hash_netiface4_data_copy(struct hash_netiface4_elem *dst, | |||
| 198 | static inline void | 198 | static inline void |
| 199 | hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags) | 199 | hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags) |
| 200 | { | 200 | { |
| 201 | dst->nomatch = flags & IPSET_FLAG_NOMATCH; | 201 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
| 202 | } | ||
| 203 | |||
| 204 | static inline void | ||
| 205 | hash_netiface4_data_reset_flags(struct hash_netiface4_elem *dst, u32 *flags) | ||
| 206 | { | ||
| 207 | if (dst->nomatch) { | ||
| 208 | *flags = IPSET_FLAG_NOMATCH; | ||
| 209 | dst->nomatch = 0; | ||
| 210 | } | ||
| 202 | } | 211 | } |
| 203 | 212 | ||
| 204 | static inline int | 213 | static inline int |
| @@ -494,7 +503,7 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst, | |||
| 494 | static inline void | 503 | static inline void |
| 495 | hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags) | 504 | hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags) |
| 496 | { | 505 | { |
| 497 | dst->nomatch = flags & IPSET_FLAG_NOMATCH; | 506 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
| 498 | } | 507 | } |
| 499 | 508 | ||
| 500 | static inline int | 509 | static inline int |
| @@ -504,6 +513,15 @@ hash_netiface6_data_match(const struct hash_netiface6_elem *elem) | |||
| 504 | } | 513 | } |
| 505 | 514 | ||
| 506 | static inline void | 515 | static inline void |
| 516 | hash_netiface6_data_reset_flags(struct hash_netiface6_elem *dst, u32 *flags) | ||
| 517 | { | ||
| 518 | if (dst->nomatch) { | ||
| 519 | *flags = IPSET_FLAG_NOMATCH; | ||
| 520 | dst->nomatch = 0; | ||
| 521 | } | ||
| 522 | } | ||
| 523 | |||
| 524 | static inline void | ||
| 507 | hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) | 525 | hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) |
| 508 | { | 526 | { |
| 509 | elem->elem = 0; | 527 | elem->elem = 0; |
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index af20c0c5ced2..349deb672a2d 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c | |||
| @@ -104,6 +104,15 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags) | |||
| 104 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); | 104 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static inline void | ||
| 108 | hash_netport4_data_reset_flags(struct hash_netport4_elem *dst, u32 *flags) | ||
| 109 | { | ||
| 110 | if (dst->nomatch) { | ||
| 111 | *flags = IPSET_FLAG_NOMATCH; | ||
| 112 | dst->nomatch = 0; | ||
| 113 | } | ||
| 114 | } | ||
| 115 | |||
| 107 | static inline int | 116 | static inline int |
| 108 | hash_netport4_data_match(const struct hash_netport4_elem *elem) | 117 | hash_netport4_data_match(const struct hash_netport4_elem *elem) |
| 109 | { | 118 | { |
| @@ -375,6 +384,15 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags) | |||
| 375 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); | 384 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
| 376 | } | 385 | } |
| 377 | 386 | ||
| 387 | static inline void | ||
| 388 | hash_netport6_data_reset_flags(struct hash_netport6_elem *dst, u32 *flags) | ||
| 389 | { | ||
| 390 | if (dst->nomatch) { | ||
| 391 | *flags = IPSET_FLAG_NOMATCH; | ||
| 392 | dst->nomatch = 0; | ||
| 393 | } | ||
| 394 | } | ||
| 395 | |||
| 378 | static inline int | 396 | static inline int |
| 379 | hash_netport6_data_match(const struct hash_netport6_elem *elem) | 397 | hash_netport6_data_match(const struct hash_netport6_elem *elem) |
| 380 | { | 398 | { |
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 8371c2bac2e4..09c744aa8982 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c | |||
| @@ -174,9 +174,13 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id, | |||
| 174 | { | 174 | { |
| 175 | const struct set_elem *e = list_set_elem(map, i); | 175 | const struct set_elem *e = list_set_elem(map, i); |
| 176 | 176 | ||
| 177 | if (i == map->size - 1 && e->id != IPSET_INVALID_ID) | 177 | if (e->id != IPSET_INVALID_ID) { |
| 178 | /* Last element replaced: e.g. add new,before,last */ | 178 | const struct set_elem *x = list_set_elem(map, map->size - 1); |
| 179 | ip_set_put_byindex(e->id); | 179 | |
| 180 | /* Last element replaced or pushed off */ | ||
| 181 | if (x->id != IPSET_INVALID_ID) | ||
| 182 | ip_set_put_byindex(x->id); | ||
| 183 | } | ||
| 180 | if (with_timeout(map->timeout)) | 184 | if (with_timeout(map->timeout)) |
| 181 | list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); | 185 | list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); |
| 182 | else | 186 | else |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 47edf5a40a59..61f49d241712 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
| @@ -1394,10 +1394,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
| 1394 | skb_reset_network_header(skb); | 1394 | skb_reset_network_header(skb); |
| 1395 | IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", | 1395 | IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", |
| 1396 | &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); | 1396 | &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); |
| 1397 | rcu_read_lock(); | ||
| 1398 | ipv4_update_pmtu(skb, dev_net(skb->dev), | 1397 | ipv4_update_pmtu(skb, dev_net(skb->dev), |
| 1399 | mtu, 0, 0, 0, 0); | 1398 | mtu, 0, 0, 0, 0); |
| 1400 | rcu_read_unlock(); | ||
| 1401 | /* Client uses PMTUD? */ | 1399 | /* Client uses PMTUD? */ |
| 1402 | if (!(cih->frag_off & htons(IP_DF))) | 1400 | if (!(cih->frag_off & htons(IP_DF))) |
| 1403 | goto ignore_ipip; | 1401 | goto ignore_ipip; |
| @@ -1577,7 +1575,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
| 1577 | } | 1575 | } |
| 1578 | /* ipvs enabled in this netns ? */ | 1576 | /* ipvs enabled in this netns ? */ |
| 1579 | net = skb_net(skb); | 1577 | net = skb_net(skb); |
| 1580 | if (!net_ipvs(net)->enable) | 1578 | ipvs = net_ipvs(net); |
| 1579 | if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) | ||
| 1581 | return NF_ACCEPT; | 1580 | return NF_ACCEPT; |
| 1582 | 1581 | ||
| 1583 | ip_vs_fill_iph_skb(af, skb, &iph); | 1582 | ip_vs_fill_iph_skb(af, skb, &iph); |
| @@ -1654,7 +1653,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
| 1654 | } | 1653 | } |
| 1655 | 1654 | ||
| 1656 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); | 1655 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); |
| 1657 | ipvs = net_ipvs(net); | ||
| 1658 | /* Check the server status */ | 1656 | /* Check the server status */ |
| 1659 | if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 1657 | if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
| 1660 | /* the destination server is not available */ | 1658 | /* the destination server is not available */ |
| @@ -1815,13 +1813,15 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, | |||
| 1815 | { | 1813 | { |
| 1816 | int r; | 1814 | int r; |
| 1817 | struct net *net; | 1815 | struct net *net; |
| 1816 | struct netns_ipvs *ipvs; | ||
| 1818 | 1817 | ||
| 1819 | if (ip_hdr(skb)->protocol != IPPROTO_ICMP) | 1818 | if (ip_hdr(skb)->protocol != IPPROTO_ICMP) |
| 1820 | return NF_ACCEPT; | 1819 | return NF_ACCEPT; |
| 1821 | 1820 | ||
| 1822 | /* ipvs enabled in this netns ? */ | 1821 | /* ipvs enabled in this netns ? */ |
| 1823 | net = skb_net(skb); | 1822 | net = skb_net(skb); |
| 1824 | if (!net_ipvs(net)->enable) | 1823 | ipvs = net_ipvs(net); |
| 1824 | if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) | ||
| 1825 | return NF_ACCEPT; | 1825 | return NF_ACCEPT; |
| 1826 | 1826 | ||
| 1827 | return ip_vs_in_icmp(skb, &r, hooknum); | 1827 | return ip_vs_in_icmp(skb, &r, hooknum); |
| @@ -1835,6 +1835,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | |||
| 1835 | { | 1835 | { |
| 1836 | int r; | 1836 | int r; |
| 1837 | struct net *net; | 1837 | struct net *net; |
| 1838 | struct netns_ipvs *ipvs; | ||
| 1838 | struct ip_vs_iphdr iphdr; | 1839 | struct ip_vs_iphdr iphdr; |
| 1839 | 1840 | ||
| 1840 | ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); | 1841 | ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); |
| @@ -1843,7 +1844,8 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | |||
| 1843 | 1844 | ||
| 1844 | /* ipvs enabled in this netns ? */ | 1845 | /* ipvs enabled in this netns ? */ |
| 1845 | net = skb_net(skb); | 1846 | net = skb_net(skb); |
| 1846 | if (!net_ipvs(net)->enable) | 1847 | ipvs = net_ipvs(net); |
| 1848 | if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) | ||
| 1847 | return NF_ACCEPT; | 1849 | return NF_ACCEPT; |
| 1848 | 1850 | ||
| 1849 | return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); | 1851 | return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c68198bf9128..9e2d1cccd1eb 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
| @@ -1808,6 +1808,12 @@ static struct ctl_table vs_vars[] = { | |||
| 1808 | .mode = 0644, | 1808 | .mode = 0644, |
| 1809 | .proc_handler = proc_dointvec, | 1809 | .proc_handler = proc_dointvec, |
| 1810 | }, | 1810 | }, |
| 1811 | { | ||
| 1812 | .procname = "backup_only", | ||
| 1813 | .maxlen = sizeof(int), | ||
| 1814 | .mode = 0644, | ||
| 1815 | .proc_handler = proc_dointvec, | ||
| 1816 | }, | ||
| 1811 | #ifdef CONFIG_IP_VS_DEBUG | 1817 | #ifdef CONFIG_IP_VS_DEBUG |
| 1812 | { | 1818 | { |
| 1813 | .procname = "debug_level", | 1819 | .procname = "debug_level", |
| @@ -3741,6 +3747,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net) | |||
| 3741 | tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; | 3747 | tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; |
| 3742 | ipvs->sysctl_pmtu_disc = 1; | 3748 | ipvs->sysctl_pmtu_disc = 1; |
| 3743 | tbl[idx++].data = &ipvs->sysctl_pmtu_disc; | 3749 | tbl[idx++].data = &ipvs->sysctl_pmtu_disc; |
| 3750 | tbl[idx++].data = &ipvs->sysctl_backup_only; | ||
| 3744 | 3751 | ||
| 3745 | 3752 | ||
| 3746 | ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); | 3753 | ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); |
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index ae8ec6f27688..cd1d7298f7ba 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c | |||
| @@ -906,7 +906,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, | |||
| 906 | sctp_chunkhdr_t _sctpch, *sch; | 906 | sctp_chunkhdr_t _sctpch, *sch; |
| 907 | unsigned char chunk_type; | 907 | unsigned char chunk_type; |
| 908 | int event, next_state; | 908 | int event, next_state; |
| 909 | int ihl; | 909 | int ihl, cofs; |
| 910 | 910 | ||
| 911 | #ifdef CONFIG_IP_VS_IPV6 | 911 | #ifdef CONFIG_IP_VS_IPV6 |
| 912 | ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); | 912 | ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); |
| @@ -914,8 +914,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, | |||
| 914 | ihl = ip_hdrlen(skb); | 914 | ihl = ip_hdrlen(skb); |
| 915 | #endif | 915 | #endif |
| 916 | 916 | ||
| 917 | sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t), | 917 | cofs = ihl + sizeof(sctp_sctphdr_t); |
| 918 | sizeof(_sctpch), &_sctpch); | 918 | sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch); |
| 919 | if (sch == NULL) | 919 | if (sch == NULL) |
| 920 | return; | 920 | return; |
| 921 | 921 | ||
| @@ -933,10 +933,12 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, | |||
| 933 | */ | 933 | */ |
| 934 | if ((sch->type == SCTP_CID_COOKIE_ECHO) || | 934 | if ((sch->type == SCTP_CID_COOKIE_ECHO) || |
| 935 | (sch->type == SCTP_CID_COOKIE_ACK)) { | 935 | (sch->type == SCTP_CID_COOKIE_ACK)) { |
| 936 | sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) + | 936 | int clen = ntohs(sch->length); |
| 937 | sch->length), sizeof(_sctpch), &_sctpch); | 937 | |
| 938 | if (sch) { | 938 | if (clen >= sizeof(sctp_chunkhdr_t)) { |
| 939 | if (sch->type == SCTP_CID_ABORT) | 939 | sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4), |
| 940 | sizeof(_sctpch), &_sctpch); | ||
| 941 | if (sch && sch->type == SCTP_CID_ABORT) | ||
| 940 | chunk_type = sch->type; | 942 | chunk_type = sch->type; |
| 941 | } | 943 | } |
| 942 | } | 944 | } |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index a9740bd6fe54..94b4b9853f60 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
| @@ -339,6 +339,13 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, | |||
| 339 | { | 339 | { |
| 340 | const struct nf_conn_help *help; | 340 | const struct nf_conn_help *help; |
| 341 | const struct nf_conntrack_helper *helper; | 341 | const struct nf_conntrack_helper *helper; |
| 342 | struct va_format vaf; | ||
| 343 | va_list args; | ||
| 344 | |||
| 345 | va_start(args, fmt); | ||
| 346 | |||
| 347 | vaf.fmt = fmt; | ||
| 348 | vaf.va = &args; | ||
| 342 | 349 | ||
| 343 | /* Called from the helper function, this call never fails */ | 350 | /* Called from the helper function, this call never fails */ |
| 344 | help = nfct_help(ct); | 351 | help = nfct_help(ct); |
| @@ -347,7 +354,9 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, | |||
| 347 | helper = rcu_dereference(help->helper); | 354 | helper = rcu_dereference(help->helper); |
| 348 | 355 | ||
| 349 | nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, | 356 | nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, |
| 350 | "nf_ct_%s: dropping packet: %s ", helper->name, fmt); | 357 | "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf); |
| 358 | |||
| 359 | va_end(args); | ||
| 351 | } | 360 | } |
| 352 | EXPORT_SYMBOL_GPL(nf_ct_helper_log); | 361 | EXPORT_SYMBOL_GPL(nf_ct_helper_log); |
| 353 | 362 | ||
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 432f95780003..ba65b2041eb4 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
| @@ -969,6 +969,10 @@ static int __init nf_conntrack_proto_dccp_init(void) | |||
| 969 | { | 969 | { |
| 970 | int ret; | 970 | int ret; |
| 971 | 971 | ||
| 972 | ret = register_pernet_subsys(&dccp_net_ops); | ||
| 973 | if (ret < 0) | ||
| 974 | goto out_pernet; | ||
| 975 | |||
| 972 | ret = nf_ct_l4proto_register(&dccp_proto4); | 976 | ret = nf_ct_l4proto_register(&dccp_proto4); |
| 973 | if (ret < 0) | 977 | if (ret < 0) |
| 974 | goto out_dccp4; | 978 | goto out_dccp4; |
| @@ -977,16 +981,12 @@ static int __init nf_conntrack_proto_dccp_init(void) | |||
| 977 | if (ret < 0) | 981 | if (ret < 0) |
| 978 | goto out_dccp6; | 982 | goto out_dccp6; |
| 979 | 983 | ||
| 980 | ret = register_pernet_subsys(&dccp_net_ops); | ||
| 981 | if (ret < 0) | ||
| 982 | goto out_pernet; | ||
| 983 | |||
| 984 | return 0; | 984 | return 0; |
| 985 | out_pernet: | ||
| 986 | nf_ct_l4proto_unregister(&dccp_proto6); | ||
| 987 | out_dccp6: | 985 | out_dccp6: |
| 988 | nf_ct_l4proto_unregister(&dccp_proto4); | 986 | nf_ct_l4proto_unregister(&dccp_proto4); |
| 989 | out_dccp4: | 987 | out_dccp4: |
| 988 | unregister_pernet_subsys(&dccp_net_ops); | ||
| 989 | out_pernet: | ||
| 990 | return ret; | 990 | return ret; |
| 991 | } | 991 | } |
| 992 | 992 | ||
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index bd7d01d9c7e7..155ce9f8a0db 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
| @@ -420,18 +420,18 @@ static int __init nf_ct_proto_gre_init(void) | |||
| 420 | { | 420 | { |
| 421 | int ret; | 421 | int ret; |
| 422 | 422 | ||
| 423 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); | ||
| 424 | if (ret < 0) | ||
| 425 | goto out_gre4; | ||
| 426 | |||
| 427 | ret = register_pernet_subsys(&proto_gre_net_ops); | 423 | ret = register_pernet_subsys(&proto_gre_net_ops); |
| 428 | if (ret < 0) | 424 | if (ret < 0) |
| 429 | goto out_pernet; | 425 | goto out_pernet; |
| 430 | 426 | ||
| 427 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); | ||
| 428 | if (ret < 0) | ||
| 429 | goto out_gre4; | ||
| 430 | |||
| 431 | return 0; | 431 | return 0; |
| 432 | out_pernet: | ||
| 433 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4); | ||
| 434 | out_gre4: | 432 | out_gre4: |
| 433 | unregister_pernet_subsys(&proto_gre_net_ops); | ||
| 434 | out_pernet: | ||
| 435 | return ret; | 435 | return ret; |
| 436 | } | 436 | } |
| 437 | 437 | ||
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 480f616d5936..ec83536def9a 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
| @@ -888,6 +888,10 @@ static int __init nf_conntrack_proto_sctp_init(void) | |||
| 888 | { | 888 | { |
| 889 | int ret; | 889 | int ret; |
| 890 | 890 | ||
| 891 | ret = register_pernet_subsys(&sctp_net_ops); | ||
| 892 | if (ret < 0) | ||
| 893 | goto out_pernet; | ||
| 894 | |||
| 891 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4); | 895 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4); |
| 892 | if (ret < 0) | 896 | if (ret < 0) |
| 893 | goto out_sctp4; | 897 | goto out_sctp4; |
| @@ -896,16 +900,12 @@ static int __init nf_conntrack_proto_sctp_init(void) | |||
| 896 | if (ret < 0) | 900 | if (ret < 0) |
| 897 | goto out_sctp6; | 901 | goto out_sctp6; |
| 898 | 902 | ||
| 899 | ret = register_pernet_subsys(&sctp_net_ops); | ||
| 900 | if (ret < 0) | ||
| 901 | goto out_pernet; | ||
| 902 | |||
| 903 | return 0; | 903 | return 0; |
| 904 | out_pernet: | ||
| 905 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6); | ||
| 906 | out_sctp6: | 904 | out_sctp6: |
| 907 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4); | 905 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4); |
| 908 | out_sctp4: | 906 | out_sctp4: |
| 907 | unregister_pernet_subsys(&sctp_net_ops); | ||
| 908 | out_pernet: | ||
| 909 | return ret; | 909 | return ret; |
| 910 | } | 910 | } |
| 911 | 911 | ||
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index 157489581c31..ca969f6273f7 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c | |||
| @@ -371,6 +371,10 @@ static int __init nf_conntrack_proto_udplite_init(void) | |||
| 371 | { | 371 | { |
| 372 | int ret; | 372 | int ret; |
| 373 | 373 | ||
| 374 | ret = register_pernet_subsys(&udplite_net_ops); | ||
| 375 | if (ret < 0) | ||
| 376 | goto out_pernet; | ||
| 377 | |||
| 374 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); | 378 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); |
| 375 | if (ret < 0) | 379 | if (ret < 0) |
| 376 | goto out_udplite4; | 380 | goto out_udplite4; |
| @@ -379,16 +383,12 @@ static int __init nf_conntrack_proto_udplite_init(void) | |||
| 379 | if (ret < 0) | 383 | if (ret < 0) |
| 380 | goto out_udplite6; | 384 | goto out_udplite6; |
| 381 | 385 | ||
| 382 | ret = register_pernet_subsys(&udplite_net_ops); | ||
| 383 | if (ret < 0) | ||
| 384 | goto out_pernet; | ||
| 385 | |||
| 386 | return 0; | 386 | return 0; |
| 387 | out_pernet: | ||
| 388 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6); | ||
| 389 | out_udplite6: | 387 | out_udplite6: |
| 390 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); | 388 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); |
| 391 | out_udplite4: | 389 | out_udplite4: |
| 390 | unregister_pernet_subsys(&udplite_net_ops); | ||
| 391 | out_pernet: | ||
| 392 | return ret; | 392 | return ret; |
| 393 | } | 393 | } |
| 394 | 394 | ||
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 0e7d423324c3..e0c4373b4747 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
| @@ -1593,10 +1593,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, | |||
| 1593 | end += strlen("\r\n\r\n") + clen; | 1593 | end += strlen("\r\n\r\n") + clen; |
| 1594 | 1594 | ||
| 1595 | msglen = origlen = end - dptr; | 1595 | msglen = origlen = end - dptr; |
| 1596 | if (msglen > datalen) { | 1596 | if (msglen > datalen) |
| 1597 | nf_ct_helper_log(skb, ct, "incomplete/bad SIP message"); | 1597 | return NF_ACCEPT; |
| 1598 | return NF_DROP; | ||
| 1599 | } | ||
| 1600 | 1598 | ||
| 1601 | ret = process_sip_msg(skb, ct, protoff, dataoff, | 1599 | ret = process_sip_msg(skb, ct, protoff, dataoff, |
| 1602 | &dptr, &msglen); | 1600 | &dptr, &msglen); |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 6bcce401fd1c..fedee3943661 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
| @@ -568,6 +568,7 @@ static int __init nf_conntrack_standalone_init(void) | |||
| 568 | register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); | 568 | register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); |
| 569 | if (!nf_ct_netfilter_header) { | 569 | if (!nf_ct_netfilter_header) { |
| 570 | pr_err("nf_conntrack: can't register to sysctl.\n"); | 570 | pr_err("nf_conntrack: can't register to sysctl.\n"); |
| 571 | ret = -ENOMEM; | ||
| 571 | goto out_sysctl; | 572 | goto out_sysctl; |
| 572 | } | 573 | } |
| 573 | #endif | 574 | #endif |
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 8d5769c6d16e..ad24be070e53 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c | |||
| @@ -467,33 +467,22 @@ EXPORT_SYMBOL_GPL(nf_nat_packet); | |||
| 467 | struct nf_nat_proto_clean { | 467 | struct nf_nat_proto_clean { |
| 468 | u8 l3proto; | 468 | u8 l3proto; |
| 469 | u8 l4proto; | 469 | u8 l4proto; |
| 470 | bool hash; | ||
| 471 | }; | 470 | }; |
| 472 | 471 | ||
| 473 | /* Clear NAT section of all conntracks, in case we're loaded again. */ | 472 | /* kill conntracks with affected NAT section */ |
| 474 | static int nf_nat_proto_clean(struct nf_conn *i, void *data) | 473 | static int nf_nat_proto_remove(struct nf_conn *i, void *data) |
| 475 | { | 474 | { |
| 476 | const struct nf_nat_proto_clean *clean = data; | 475 | const struct nf_nat_proto_clean *clean = data; |
| 477 | struct nf_conn_nat *nat = nfct_nat(i); | 476 | struct nf_conn_nat *nat = nfct_nat(i); |
| 478 | 477 | ||
| 479 | if (!nat) | 478 | if (!nat) |
| 480 | return 0; | 479 | return 0; |
| 481 | if (!(i->status & IPS_SRC_NAT_DONE)) | 480 | |
| 482 | return 0; | ||
| 483 | if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || | 481 | if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || |
| 484 | (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) | 482 | (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) |
| 485 | return 0; | 483 | return 0; |
| 486 | 484 | ||
| 487 | if (clean->hash) { | 485 | return i->status & IPS_NAT_MASK ? 1 : 0; |
| 488 | spin_lock_bh(&nf_nat_lock); | ||
| 489 | hlist_del_rcu(&nat->bysource); | ||
| 490 | spin_unlock_bh(&nf_nat_lock); | ||
| 491 | } else { | ||
| 492 | memset(nat, 0, sizeof(*nat)); | ||
| 493 | i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | | ||
| 494 | IPS_SEQ_ADJUST); | ||
| 495 | } | ||
| 496 | return 0; | ||
| 497 | } | 486 | } |
| 498 | 487 | ||
| 499 | static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) | 488 | static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) |
| @@ -505,16 +494,8 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) | |||
| 505 | struct net *net; | 494 | struct net *net; |
| 506 | 495 | ||
| 507 | rtnl_lock(); | 496 | rtnl_lock(); |
| 508 | /* Step 1 - remove from bysource hash */ | ||
| 509 | clean.hash = true; | ||
| 510 | for_each_net(net) | 497 | for_each_net(net) |
| 511 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); | 498 | nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); |
| 512 | synchronize_rcu(); | ||
| 513 | |||
| 514 | /* Step 2 - clean NAT section */ | ||
| 515 | clean.hash = false; | ||
| 516 | for_each_net(net) | ||
| 517 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); | ||
| 518 | rtnl_unlock(); | 499 | rtnl_unlock(); |
| 519 | } | 500 | } |
| 520 | 501 | ||
| @@ -526,16 +507,9 @@ static void nf_nat_l3proto_clean(u8 l3proto) | |||
| 526 | struct net *net; | 507 | struct net *net; |
| 527 | 508 | ||
| 528 | rtnl_lock(); | 509 | rtnl_lock(); |
| 529 | /* Step 1 - remove from bysource hash */ | ||
| 530 | clean.hash = true; | ||
| 531 | for_each_net(net) | ||
| 532 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); | ||
| 533 | synchronize_rcu(); | ||
| 534 | 510 | ||
| 535 | /* Step 2 - clean NAT section */ | ||
| 536 | clean.hash = false; | ||
| 537 | for_each_net(net) | 511 | for_each_net(net) |
| 538 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); | 512 | nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); |
| 539 | rtnl_unlock(); | 513 | rtnl_unlock(); |
| 540 | } | 514 | } |
| 541 | 515 | ||
| @@ -773,7 +747,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) | |||
| 773 | { | 747 | { |
| 774 | struct nf_nat_proto_clean clean = {}; | 748 | struct nf_nat_proto_clean clean = {}; |
| 775 | 749 | ||
| 776 | nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean); | 750 | nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean); |
| 777 | synchronize_rcu(); | 751 | synchronize_rcu(); |
| 778 | nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); | 752 | nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); |
| 779 | } | 753 | } |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index d578ec251712..0b1b32cda307 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
| @@ -62,11 +62,6 @@ void nfnl_unlock(__u8 subsys_id) | |||
| 62 | } | 62 | } |
| 63 | EXPORT_SYMBOL_GPL(nfnl_unlock); | 63 | EXPORT_SYMBOL_GPL(nfnl_unlock); |
| 64 | 64 | ||
| 65 | static struct mutex *nfnl_get_lock(__u8 subsys_id) | ||
| 66 | { | ||
| 67 | return &table[subsys_id].mutex; | ||
| 68 | } | ||
| 69 | |||
| 70 | int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) | 65 | int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) |
| 71 | { | 66 | { |
| 72 | nfnl_lock(n->subsys_id); | 67 | nfnl_lock(n->subsys_id); |
| @@ -199,7 +194,7 @@ replay: | |||
| 199 | rcu_read_unlock(); | 194 | rcu_read_unlock(); |
| 200 | nfnl_lock(subsys_id); | 195 | nfnl_lock(subsys_id); |
| 201 | if (rcu_dereference_protected(table[subsys_id].subsys, | 196 | if (rcu_dereference_protected(table[subsys_id].subsys, |
| 202 | lockdep_is_held(nfnl_get_lock(subsys_id))) != ss || | 197 | lockdep_is_held(&table[subsys_id].mutex)) != ss || |
| 203 | nfnetlink_find_client(type, ss) != nc) | 198 | nfnetlink_find_client(type, ss) != nc) |
| 204 | err = -EAGAIN; | 199 | err = -EAGAIN; |
| 205 | else if (nc->call) | 200 | else if (nc->call) |
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 589d686f0b4c..dc3fd5d44464 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c | |||
| @@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, | |||
| 49 | return -EINVAL; | 49 | return -EINVAL; |
| 50 | 50 | ||
| 51 | acct_name = nla_data(tb[NFACCT_NAME]); | 51 | acct_name = nla_data(tb[NFACCT_NAME]); |
| 52 | if (strlen(acct_name) == 0) | ||
| 53 | return -EINVAL; | ||
| 52 | 54 | ||
| 53 | list_for_each_entry(nfacct, &nfnl_acct_list, head) { | 55 | list_for_each_entry(nfacct, &nfnl_acct_list, head) { |
| 54 | if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) | 56 | if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 858fd52c1040..42680b2baa11 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
| @@ -112,7 +112,7 @@ instance_create(u_int16_t queue_num, int portid) | |||
| 112 | inst->queue_num = queue_num; | 112 | inst->queue_num = queue_num; |
| 113 | inst->peer_portid = portid; | 113 | inst->peer_portid = portid; |
| 114 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; | 114 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; |
| 115 | inst->copy_range = 0xfffff; | 115 | inst->copy_range = 0xffff; |
| 116 | inst->copy_mode = NFQNL_COPY_NONE; | 116 | inst->copy_mode = NFQNL_COPY_NONE; |
| 117 | spin_lock_init(&inst->lock); | 117 | spin_lock_init(&inst->lock); |
| 118 | INIT_LIST_HEAD(&inst->queue_list); | 118 | INIT_LIST_HEAD(&inst->queue_list); |
| @@ -1062,8 +1062,10 @@ static int __init nfnetlink_queue_init(void) | |||
| 1062 | 1062 | ||
| 1063 | #ifdef CONFIG_PROC_FS | 1063 | #ifdef CONFIG_PROC_FS |
| 1064 | if (!proc_create("nfnetlink_queue", 0440, | 1064 | if (!proc_create("nfnetlink_queue", 0440, |
| 1065 | proc_net_netfilter, &nfqnl_file_ops)) | 1065 | proc_net_netfilter, &nfqnl_file_ops)) { |
| 1066 | status = -ENOMEM; | ||
| 1066 | goto cleanup_subsys; | 1067 | goto cleanup_subsys; |
| 1068 | } | ||
| 1067 | #endif | 1069 | #endif |
| 1068 | 1070 | ||
| 1069 | register_netdevice_notifier(&nfqnl_dev_notifier); | 1071 | register_netdevice_notifier(&nfqnl_dev_notifier); |
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c index ba92824086f3..3228d7f24eb4 100644 --- a/net/netfilter/xt_AUDIT.c +++ b/net/netfilter/xt_AUDIT.c | |||
| @@ -124,6 +124,9 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 124 | const struct xt_audit_info *info = par->targinfo; | 124 | const struct xt_audit_info *info = par->targinfo; |
| 125 | struct audit_buffer *ab; | 125 | struct audit_buffer *ab; |
| 126 | 126 | ||
| 127 | if (audit_enabled == 0) | ||
| 128 | goto errout; | ||
| 129 | |||
| 127 | ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); | 130 | ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); |
| 128 | if (ab == NULL) | 131 | if (ab == NULL) |
| 129 | goto errout; | 132 | goto errout; |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 847d495cd4de..8a6c6ea466d8 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
| @@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
| 1189 | struct netlbl_unlhsh_walk_arg cb_arg; | 1189 | struct netlbl_unlhsh_walk_arg cb_arg; |
| 1190 | u32 skip_bkt = cb->args[0]; | 1190 | u32 skip_bkt = cb->args[0]; |
| 1191 | u32 skip_chain = cb->args[1]; | 1191 | u32 skip_chain = cb->args[1]; |
| 1192 | u32 skip_addr4 = cb->args[2]; | ||
| 1193 | u32 skip_addr6 = cb->args[3]; | ||
| 1194 | u32 iter_bkt; | 1192 | u32 iter_bkt; |
| 1195 | u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; | 1193 | u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; |
| 1196 | struct netlbl_unlhsh_iface *iface; | 1194 | struct netlbl_unlhsh_iface *iface; |
| @@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
| 1215 | continue; | 1213 | continue; |
| 1216 | netlbl_af4list_foreach_rcu(addr4, | 1214 | netlbl_af4list_foreach_rcu(addr4, |
| 1217 | &iface->addr4_list) { | 1215 | &iface->addr4_list) { |
| 1218 | if (iter_addr4++ < skip_addr4) | 1216 | if (iter_addr4++ < cb->args[2]) |
| 1219 | continue; | 1217 | continue; |
| 1220 | if (netlbl_unlabel_staticlist_gen( | 1218 | if (netlbl_unlabel_staticlist_gen( |
| 1221 | NLBL_UNLABEL_C_STATICLIST, | 1219 | NLBL_UNLABEL_C_STATICLIST, |
| @@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
| 1231 | #if IS_ENABLED(CONFIG_IPV6) | 1229 | #if IS_ENABLED(CONFIG_IPV6) |
| 1232 | netlbl_af6list_foreach_rcu(addr6, | 1230 | netlbl_af6list_foreach_rcu(addr6, |
| 1233 | &iface->addr6_list) { | 1231 | &iface->addr6_list) { |
| 1234 | if (iter_addr6++ < skip_addr6) | 1232 | if (iter_addr6++ < cb->args[3]) |
| 1235 | continue; | 1233 | continue; |
| 1236 | if (netlbl_unlabel_staticlist_gen( | 1234 | if (netlbl_unlabel_staticlist_gen( |
| 1237 | NLBL_UNLABEL_C_STATICLIST, | 1235 | NLBL_UNLABEL_C_STATICLIST, |
| @@ -1250,10 +1248,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, | |||
| 1250 | 1248 | ||
| 1251 | unlabel_staticlist_return: | 1249 | unlabel_staticlist_return: |
| 1252 | rcu_read_unlock(); | 1250 | rcu_read_unlock(); |
| 1253 | cb->args[0] = skip_bkt; | 1251 | cb->args[0] = iter_bkt; |
| 1254 | cb->args[1] = skip_chain; | 1252 | cb->args[1] = iter_chain; |
| 1255 | cb->args[2] = skip_addr4; | 1253 | cb->args[2] = iter_addr4; |
| 1256 | cb->args[3] = skip_addr6; | 1254 | cb->args[3] = iter_addr6; |
| 1257 | return skb->len; | 1255 | return skb->len; |
| 1258 | } | 1256 | } |
| 1259 | 1257 | ||
| @@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
| 1273 | { | 1271 | { |
| 1274 | struct netlbl_unlhsh_walk_arg cb_arg; | 1272 | struct netlbl_unlhsh_walk_arg cb_arg; |
| 1275 | struct netlbl_unlhsh_iface *iface; | 1273 | struct netlbl_unlhsh_iface *iface; |
| 1276 | u32 skip_addr4 = cb->args[0]; | 1274 | u32 iter_addr4 = 0, iter_addr6 = 0; |
| 1277 | u32 skip_addr6 = cb->args[1]; | ||
| 1278 | u32 iter_addr4 = 0; | ||
| 1279 | struct netlbl_af4list *addr4; | 1275 | struct netlbl_af4list *addr4; |
| 1280 | #if IS_ENABLED(CONFIG_IPV6) | 1276 | #if IS_ENABLED(CONFIG_IPV6) |
| 1281 | u32 iter_addr6 = 0; | ||
| 1282 | struct netlbl_af6list *addr6; | 1277 | struct netlbl_af6list *addr6; |
| 1283 | #endif | 1278 | #endif |
| 1284 | 1279 | ||
| @@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
| 1292 | goto unlabel_staticlistdef_return; | 1287 | goto unlabel_staticlistdef_return; |
| 1293 | 1288 | ||
| 1294 | netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { | 1289 | netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { |
| 1295 | if (iter_addr4++ < skip_addr4) | 1290 | if (iter_addr4++ < cb->args[0]) |
| 1296 | continue; | 1291 | continue; |
| 1297 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, | 1292 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, |
| 1298 | iface, | 1293 | iface, |
| @@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
| 1305 | } | 1300 | } |
| 1306 | #if IS_ENABLED(CONFIG_IPV6) | 1301 | #if IS_ENABLED(CONFIG_IPV6) |
| 1307 | netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { | 1302 | netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { |
| 1308 | if (iter_addr6++ < skip_addr6) | 1303 | if (iter_addr6++ < cb->args[1]) |
| 1309 | continue; | 1304 | continue; |
| 1310 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, | 1305 | if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, |
| 1311 | iface, | 1306 | iface, |
| @@ -1320,8 +1315,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, | |||
| 1320 | 1315 | ||
| 1321 | unlabel_staticlistdef_return: | 1316 | unlabel_staticlistdef_return: |
| 1322 | rcu_read_unlock(); | 1317 | rcu_read_unlock(); |
| 1323 | cb->args[0] = skip_addr4; | 1318 | cb->args[0] = iter_addr4; |
| 1324 | cb->args[1] = skip_addr6; | 1319 | cb->args[1] = iter_addr6; |
| 1325 | return skb->len; | 1320 | return skb->len; |
| 1326 | } | 1321 | } |
| 1327 | 1322 | ||
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index f2aabb6f4105..5a55be3f17a5 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
| @@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family, | |||
| 142 | int err = 0; | 142 | int err = 0; |
| 143 | 143 | ||
| 144 | BUG_ON(grp->name[0] == '\0'); | 144 | BUG_ON(grp->name[0] == '\0'); |
| 145 | BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL); | ||
| 145 | 146 | ||
| 146 | genl_lock(); | 147 | genl_lock(); |
| 147 | 148 | ||
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index d1fa1d9ffd2e..103bd704b5fc 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
| @@ -1173,6 +1173,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1173 | } | 1173 | } |
| 1174 | 1174 | ||
| 1175 | if (sax != NULL) { | 1175 | if (sax != NULL) { |
| 1176 | memset(sax, 0, sizeof(*sax)); | ||
| 1176 | sax->sax25_family = AF_NETROM; | 1177 | sax->sax25_family = AF_NETROM; |
| 1177 | skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, | 1178 | skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, |
| 1178 | AX25_ADDR_LEN); | 1179 | AX25_ADDR_LEN); |
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 7f8266dd14cb..ee25f25f0cd6 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c | |||
| @@ -68,7 +68,8 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock) | |||
| 68 | } | 68 | } |
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | 71 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, |
| 72 | int err) | ||
| 72 | { | 73 | { |
| 73 | struct sock *sk; | 74 | struct sock *sk; |
| 74 | struct hlist_node *tmp; | 75 | struct hlist_node *tmp; |
| @@ -100,11 +101,12 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | |||
| 100 | 101 | ||
| 101 | nfc_llcp_accept_unlink(accept_sk); | 102 | nfc_llcp_accept_unlink(accept_sk); |
| 102 | 103 | ||
| 104 | if (err) | ||
| 105 | accept_sk->sk_err = err; | ||
| 103 | accept_sk->sk_state = LLCP_CLOSED; | 106 | accept_sk->sk_state = LLCP_CLOSED; |
| 107 | accept_sk->sk_state_change(sk); | ||
| 104 | 108 | ||
| 105 | bh_unlock_sock(accept_sk); | 109 | bh_unlock_sock(accept_sk); |
| 106 | |||
| 107 | sock_orphan(accept_sk); | ||
| 108 | } | 110 | } |
| 109 | 111 | ||
| 110 | if (listen == true) { | 112 | if (listen == true) { |
| @@ -123,16 +125,45 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | |||
| 123 | continue; | 125 | continue; |
| 124 | } | 126 | } |
| 125 | 127 | ||
| 128 | if (err) | ||
| 129 | sk->sk_err = err; | ||
| 126 | sk->sk_state = LLCP_CLOSED; | 130 | sk->sk_state = LLCP_CLOSED; |
| 131 | sk->sk_state_change(sk); | ||
| 127 | 132 | ||
| 128 | bh_unlock_sock(sk); | 133 | bh_unlock_sock(sk); |
| 129 | 134 | ||
| 130 | sock_orphan(sk); | ||
| 131 | |||
| 132 | sk_del_node_init(sk); | 135 | sk_del_node_init(sk); |
| 133 | } | 136 | } |
| 134 | 137 | ||
| 135 | write_unlock(&local->sockets.lock); | 138 | write_unlock(&local->sockets.lock); |
| 139 | |||
| 140 | /* | ||
| 141 | * If we want to keep the listening sockets alive, | ||
| 142 | * we don't touch the RAW ones. | ||
| 143 | */ | ||
| 144 | if (listen == true) | ||
| 145 | return; | ||
| 146 | |||
| 147 | write_lock(&local->raw_sockets.lock); | ||
| 148 | |||
| 149 | sk_for_each_safe(sk, tmp, &local->raw_sockets.head) { | ||
| 150 | llcp_sock = nfc_llcp_sock(sk); | ||
| 151 | |||
| 152 | bh_lock_sock(sk); | ||
| 153 | |||
| 154 | nfc_llcp_socket_purge(llcp_sock); | ||
| 155 | |||
| 156 | if (err) | ||
| 157 | sk->sk_err = err; | ||
| 158 | sk->sk_state = LLCP_CLOSED; | ||
| 159 | sk->sk_state_change(sk); | ||
| 160 | |||
| 161 | bh_unlock_sock(sk); | ||
| 162 | |||
| 163 | sk_del_node_init(sk); | ||
| 164 | } | ||
| 165 | |||
| 166 | write_unlock(&local->raw_sockets.lock); | ||
| 136 | } | 167 | } |
| 137 | 168 | ||
| 138 | struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) | 169 | struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) |
| @@ -142,20 +173,25 @@ struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) | |||
| 142 | return local; | 173 | return local; |
| 143 | } | 174 | } |
| 144 | 175 | ||
| 145 | static void local_release(struct kref *ref) | 176 | static void local_cleanup(struct nfc_llcp_local *local, bool listen) |
| 146 | { | 177 | { |
| 147 | struct nfc_llcp_local *local; | 178 | nfc_llcp_socket_release(local, listen, ENXIO); |
| 148 | |||
| 149 | local = container_of(ref, struct nfc_llcp_local, ref); | ||
| 150 | |||
| 151 | list_del(&local->list); | ||
| 152 | nfc_llcp_socket_release(local, false); | ||
| 153 | del_timer_sync(&local->link_timer); | 179 | del_timer_sync(&local->link_timer); |
| 154 | skb_queue_purge(&local->tx_queue); | 180 | skb_queue_purge(&local->tx_queue); |
| 155 | cancel_work_sync(&local->tx_work); | 181 | cancel_work_sync(&local->tx_work); |
| 156 | cancel_work_sync(&local->rx_work); | 182 | cancel_work_sync(&local->rx_work); |
| 157 | cancel_work_sync(&local->timeout_work); | 183 | cancel_work_sync(&local->timeout_work); |
| 158 | kfree_skb(local->rx_pending); | 184 | kfree_skb(local->rx_pending); |
| 185 | } | ||
| 186 | |||
| 187 | static void local_release(struct kref *ref) | ||
| 188 | { | ||
| 189 | struct nfc_llcp_local *local; | ||
| 190 | |||
| 191 | local = container_of(ref, struct nfc_llcp_local, ref); | ||
| 192 | |||
| 193 | list_del(&local->list); | ||
| 194 | local_cleanup(local, false); | ||
| 159 | kfree(local); | 195 | kfree(local); |
| 160 | } | 196 | } |
| 161 | 197 | ||
| @@ -785,7 +821,6 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local, | |||
| 785 | skb_get(skb); | 821 | skb_get(skb); |
| 786 | } else { | 822 | } else { |
| 787 | pr_err("Receive queue is full\n"); | 823 | pr_err("Receive queue is full\n"); |
| 788 | kfree_skb(skb); | ||
| 789 | } | 824 | } |
| 790 | 825 | ||
| 791 | nfc_llcp_sock_put(llcp_sock); | 826 | nfc_llcp_sock_put(llcp_sock); |
| @@ -986,7 +1021,6 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local, | |||
| 986 | skb_get(skb); | 1021 | skb_get(skb); |
| 987 | } else { | 1022 | } else { |
| 988 | pr_err("Receive queue is full\n"); | 1023 | pr_err("Receive queue is full\n"); |
| 989 | kfree_skb(skb); | ||
| 990 | } | 1024 | } |
| 991 | } | 1025 | } |
| 992 | 1026 | ||
| @@ -1348,7 +1382,7 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev) | |||
| 1348 | return; | 1382 | return; |
| 1349 | 1383 | ||
| 1350 | /* Close and purge all existing sockets */ | 1384 | /* Close and purge all existing sockets */ |
| 1351 | nfc_llcp_socket_release(local, true); | 1385 | nfc_llcp_socket_release(local, true, 0); |
| 1352 | } | 1386 | } |
| 1353 | 1387 | ||
| 1354 | void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, | 1388 | void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, |
| @@ -1427,6 +1461,8 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev) | |||
| 1427 | return; | 1461 | return; |
| 1428 | } | 1462 | } |
| 1429 | 1463 | ||
| 1464 | local_cleanup(local, false); | ||
| 1465 | |||
| 1430 | nfc_llcp_local_put(local); | 1466 | nfc_llcp_local_put(local); |
| 1431 | } | 1467 | } |
| 1432 | 1468 | ||
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 5332751943a9..6c94447ec414 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
| @@ -270,7 +270,9 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent, | |||
| 270 | } | 270 | } |
| 271 | 271 | ||
| 272 | if (sk->sk_state == LLCP_CONNECTED || !newsock) { | 272 | if (sk->sk_state == LLCP_CONNECTED || !newsock) { |
| 273 | nfc_llcp_accept_unlink(sk); | 273 | list_del_init(&lsk->accept_queue); |
| 274 | sock_put(sk); | ||
| 275 | |||
| 274 | if (newsock) | 276 | if (newsock) |
| 275 | sock_graft(sk, newsock); | 277 | sock_graft(sk, newsock); |
| 276 | 278 | ||
| @@ -278,6 +280,8 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent, | |||
| 278 | 280 | ||
| 279 | pr_debug("Returning sk state %d\n", sk->sk_state); | 281 | pr_debug("Returning sk state %d\n", sk->sk_state); |
| 280 | 282 | ||
| 283 | sk_acceptq_removed(parent); | ||
| 284 | |||
| 281 | return sk; | 285 | return sk; |
| 282 | } | 286 | } |
| 283 | 287 | ||
| @@ -462,8 +466,6 @@ static int llcp_sock_release(struct socket *sock) | |||
| 462 | nfc_llcp_accept_unlink(accept_sk); | 466 | nfc_llcp_accept_unlink(accept_sk); |
| 463 | 467 | ||
| 464 | release_sock(accept_sk); | 468 | release_sock(accept_sk); |
| 465 | |||
| 466 | sock_orphan(accept_sk); | ||
| 467 | } | 469 | } |
| 468 | } | 470 | } |
| 469 | 471 | ||
| @@ -644,6 +646,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 644 | 646 | ||
| 645 | pr_debug("%p %zu\n", sk, len); | 647 | pr_debug("%p %zu\n", sk, len); |
| 646 | 648 | ||
| 649 | msg->msg_namelen = 0; | ||
| 650 | |||
| 647 | lock_sock(sk); | 651 | lock_sock(sk); |
| 648 | 652 | ||
| 649 | if (sk->sk_state == LLCP_CLOSED && | 653 | if (sk->sk_state == LLCP_CLOSED && |
| @@ -689,6 +693,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 689 | 693 | ||
| 690 | pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); | 694 | pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); |
| 691 | 695 | ||
| 696 | memset(sockaddr, 0, sizeof(*sockaddr)); | ||
| 692 | sockaddr->sa_family = AF_NFC; | 697 | sockaddr->sa_family = AF_NFC; |
| 693 | sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; | 698 | sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; |
| 694 | sockaddr->dsap = ui_cb->dsap; | 699 | sockaddr->dsap = ui_cb->dsap; |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index ac2defeeba83..d4d5363c7ba7 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
| @@ -58,7 +58,7 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) | |||
| 58 | 58 | ||
| 59 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 59 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
| 60 | skb->csum = csum_sub(skb->csum, csum_partial(skb->data | 60 | skb->csum = csum_sub(skb->csum, csum_partial(skb->data |
| 61 | + ETH_HLEN, VLAN_HLEN, 0)); | 61 | + (2 * ETH_ALEN), VLAN_HLEN, 0)); |
| 62 | 62 | ||
| 63 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); | 63 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
| 64 | *current_tci = vhdr->h_vlan_TCI; | 64 | *current_tci = vhdr->h_vlan_TCI; |
| @@ -115,7 +115,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla | |||
| 115 | 115 | ||
| 116 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 116 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
| 117 | skb->csum = csum_add(skb->csum, csum_partial(skb->data | 117 | skb->csum = csum_add(skb->csum, csum_partial(skb->data |
| 118 | + ETH_HLEN, VLAN_HLEN, 0)); | 118 | + (2 * ETH_ALEN), VLAN_HLEN, 0)); |
| 119 | 119 | ||
| 120 | } | 120 | } |
| 121 | __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); | 121 | __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e87a26506dba..6980c3e6f066 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
| @@ -394,6 +394,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, | |||
| 394 | 394 | ||
| 395 | skb_copy_and_csum_dev(skb, nla_data(nla)); | 395 | skb_copy_and_csum_dev(skb, nla_data(nla)); |
| 396 | 396 | ||
| 397 | genlmsg_end(user_skb, upcall); | ||
| 397 | err = genlmsg_unicast(net, user_skb, upcall_info->portid); | 398 | err = genlmsg_unicast(net, user_skb, upcall_info->portid); |
| 398 | 399 | ||
| 399 | out: | 400 | out: |
| @@ -1592,10 +1593,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, | |||
| 1592 | return ERR_PTR(-ENOMEM); | 1593 | return ERR_PTR(-ENOMEM); |
| 1593 | 1594 | ||
| 1594 | retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); | 1595 | retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); |
| 1595 | if (retval < 0) { | 1596 | BUG_ON(retval < 0); |
| 1596 | kfree_skb(skb); | 1597 | |
| 1597 | return ERR_PTR(retval); | ||
| 1598 | } | ||
| 1599 | return skb; | 1598 | return skb; |
| 1600 | } | 1599 | } |
| 1601 | 1600 | ||
| @@ -1690,6 +1689,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
| 1690 | if (IS_ERR(vport)) | 1689 | if (IS_ERR(vport)) |
| 1691 | goto exit_unlock; | 1690 | goto exit_unlock; |
| 1692 | 1691 | ||
| 1692 | err = 0; | ||
| 1693 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, | 1693 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, |
| 1694 | OVS_VPORT_CMD_NEW); | 1694 | OVS_VPORT_CMD_NEW); |
| 1695 | if (IS_ERR(reply)) { | 1695 | if (IS_ERR(reply)) { |
| @@ -1724,24 +1724,32 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1724 | nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) | 1724 | nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) |
| 1725 | err = -EINVAL; | 1725 | err = -EINVAL; |
| 1726 | 1726 | ||
| 1727 | reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
| 1728 | if (!reply) { | ||
| 1729 | err = -ENOMEM; | ||
| 1730 | goto exit_unlock; | ||
| 1731 | } | ||
| 1732 | |||
| 1727 | if (!err && a[OVS_VPORT_ATTR_OPTIONS]) | 1733 | if (!err && a[OVS_VPORT_ATTR_OPTIONS]) |
| 1728 | err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); | 1734 | err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); |
| 1729 | if (err) | 1735 | if (err) |
| 1730 | goto exit_unlock; | 1736 | goto exit_free; |
| 1737 | |||
| 1731 | if (a[OVS_VPORT_ATTR_UPCALL_PID]) | 1738 | if (a[OVS_VPORT_ATTR_UPCALL_PID]) |
| 1732 | vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); | 1739 | vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); |
| 1733 | 1740 | ||
| 1734 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, | 1741 | err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, |
| 1735 | OVS_VPORT_CMD_NEW); | 1742 | info->snd_seq, 0, OVS_VPORT_CMD_NEW); |
| 1736 | if (IS_ERR(reply)) { | 1743 | BUG_ON(err < 0); |
| 1737 | netlink_set_err(sock_net(skb->sk)->genl_sock, 0, | ||
| 1738 | ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); | ||
| 1739 | goto exit_unlock; | ||
| 1740 | } | ||
| 1741 | 1744 | ||
| 1742 | genl_notify(reply, genl_info_net(info), info->snd_portid, | 1745 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
| 1743 | ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); | 1746 | ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); |
| 1744 | 1747 | ||
| 1748 | rtnl_unlock(); | ||
| 1749 | return 0; | ||
| 1750 | |||
| 1751 | exit_free: | ||
| 1752 | kfree_skb(reply); | ||
| 1745 | exit_unlock: | 1753 | exit_unlock: |
| 1746 | rtnl_unlock(); | 1754 | rtnl_unlock(); |
| 1747 | return err; | 1755 | return err; |
| @@ -1771,6 +1779,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
| 1771 | if (IS_ERR(reply)) | 1779 | if (IS_ERR(reply)) |
| 1772 | goto exit_unlock; | 1780 | goto exit_unlock; |
| 1773 | 1781 | ||
| 1782 | err = 0; | ||
| 1774 | ovs_dp_detach_port(vport); | 1783 | ovs_dp_detach_port(vport); |
| 1775 | 1784 | ||
| 1776 | genl_notify(reply, genl_info_net(info), info->snd_portid, | 1785 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 20605ecf100b..67a2b783fe70 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
| @@ -482,7 +482,11 @@ static __be16 parse_ethertype(struct sk_buff *skb) | |||
| 482 | return htons(ETH_P_802_2); | 482 | return htons(ETH_P_802_2); |
| 483 | 483 | ||
| 484 | __skb_pull(skb, sizeof(struct llc_snap_hdr)); | 484 | __skb_pull(skb, sizeof(struct llc_snap_hdr)); |
| 485 | return llc->ethertype; | 485 | |
| 486 | if (ntohs(llc->ethertype) >= 1536) | ||
| 487 | return llc->ethertype; | ||
| 488 | |||
| 489 | return htons(ETH_P_802_2); | ||
| 486 | } | 490 | } |
| 487 | 491 | ||
| 488 | static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, | 492 | static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, |
| @@ -791,9 +795,9 @@ void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) | |||
| 791 | 795 | ||
| 792 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) | 796 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) |
| 793 | { | 797 | { |
| 798 | BUG_ON(table->count == 0); | ||
| 794 | hlist_del_rcu(&flow->hash_node[table->node_ver]); | 799 | hlist_del_rcu(&flow->hash_node[table->node_ver]); |
| 795 | table->count--; | 800 | table->count--; |
| 796 | BUG_ON(table->count < 0); | ||
| 797 | } | 801 | } |
| 798 | 802 | ||
| 799 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ | 803 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ |
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 670cbc3518de..2130d61c384a 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c | |||
| @@ -43,8 +43,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) | |||
| 43 | 43 | ||
| 44 | /* Make our own copy of the packet. Otherwise we will mangle the | 44 | /* Make our own copy of the packet. Otherwise we will mangle the |
| 45 | * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). | 45 | * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). |
| 46 | * (No one comes after us, since we tell handle_bridge() that we took | 46 | */ |
| 47 | * the packet.) */ | ||
| 48 | skb = skb_share_check(skb, GFP_ATOMIC); | 47 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 49 | if (unlikely(!skb)) | 48 | if (unlikely(!skb)) |
| 50 | return; | 49 | return; |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index ba717cc038b3..f6b8132ce4cb 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
| @@ -325,8 +325,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) | |||
| 325 | * @skb: skb that was received | 325 | * @skb: skb that was received |
| 326 | * | 326 | * |
| 327 | * Must be called with rcu_read_lock. The packet cannot be shared and | 327 | * Must be called with rcu_read_lock. The packet cannot be shared and |
| 328 | * skb->data should point to the Ethernet header. The caller must have already | 328 | * skb->data should point to the Ethernet header. |
| 329 | * called compute_ip_summed() to initialize the checksumming fields. | ||
| 330 | */ | 329 | */ |
| 331 | void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) | 330 | void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) |
| 332 | { | 331 | { |
diff --git a/net/rds/message.c b/net/rds/message.c index f0a4658f3273..aba232f9f308 100644 --- a/net/rds/message.c +++ b/net/rds/message.c | |||
| @@ -82,10 +82,7 @@ static void rds_message_purge(struct rds_message *rm) | |||
| 82 | void rds_message_put(struct rds_message *rm) | 82 | void rds_message_put(struct rds_message *rm) |
| 83 | { | 83 | { |
| 84 | rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); | 84 | rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); |
| 85 | if (atomic_read(&rm->m_refcount) == 0) { | 85 | WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm); |
| 86 | printk(KERN_CRIT "danger refcount zero on %p\n", rm); | ||
| 87 | WARN_ON(1); | ||
| 88 | } | ||
| 89 | if (atomic_dec_and_test(&rm->m_refcount)) { | 86 | if (atomic_dec_and_test(&rm->m_refcount)) { |
| 90 | BUG_ON(!list_empty(&rm->m_sock_item)); | 87 | BUG_ON(!list_empty(&rm->m_sock_item)); |
| 91 | BUG_ON(!list_empty(&rm->m_conn_item)); | 88 | BUG_ON(!list_empty(&rm->m_conn_item)); |
| @@ -197,6 +194,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp) | |||
| 197 | { | 194 | { |
| 198 | struct rds_message *rm; | 195 | struct rds_message *rm; |
| 199 | 196 | ||
| 197 | if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message)) | ||
| 198 | return NULL; | ||
| 199 | |||
| 200 | rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); | 200 | rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); |
| 201 | if (!rm) | 201 | if (!rm) |
| 202 | goto out; | 202 | goto out; |
diff --git a/net/rds/stats.c b/net/rds/stats.c index 7be790d60b90..73be187d389e 100644 --- a/net/rds/stats.c +++ b/net/rds/stats.c | |||
| @@ -87,6 +87,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter, | |||
| 87 | for (i = 0; i < nr; i++) { | 87 | for (i = 0; i < nr; i++) { |
| 88 | BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); | 88 | BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); |
| 89 | strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); | 89 | strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); |
| 90 | ctr.name[sizeof(ctr.name) - 1] = '\0'; | ||
| 90 | ctr.value = values[i]; | 91 | ctr.value = values[i]; |
| 91 | 92 | ||
| 92 | rds_info_copy(iter, &ctr, sizeof(ctr)); | 93 | rds_info_copy(iter, &ctr, sizeof(ctr)); |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index cf68e6e4054a..9c8347451597 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
| @@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1253 | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 1253 | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
| 1254 | 1254 | ||
| 1255 | if (srose != NULL) { | 1255 | if (srose != NULL) { |
| 1256 | memset(srose, 0, msg->msg_namelen); | ||
| 1256 | srose->srose_family = AF_ROSE; | 1257 | srose->srose_family = AF_ROSE; |
| 1257 | srose->srose_addr = rose->dest_addr; | 1258 | srose->srose_addr = rose->dest_addr; |
| 1258 | srose->srose_call = rose->dest_call; | 1259 | srose->srose_call = rose->dest_call; |
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 1135d8227f9b..9b97172db84a 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c | |||
| @@ -204,7 +204,6 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, | |||
| 204 | if (err < 0) | 204 | if (err < 0) |
| 205 | return err; | 205 | return err; |
| 206 | 206 | ||
| 207 | err = -EINVAL; | ||
| 208 | if (tb[TCA_FW_CLASSID]) { | 207 | if (tb[TCA_FW_CLASSID]) { |
| 209 | f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); | 208 | f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); |
| 210 | tcf_bind_filter(tp, &f->res, base); | 209 | tcf_bind_filter(tp, &f->res, base); |
| @@ -218,6 +217,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, | |||
| 218 | } | 217 | } |
| 219 | #endif /* CONFIG_NET_CLS_IND */ | 218 | #endif /* CONFIG_NET_CLS_IND */ |
| 220 | 219 | ||
| 220 | err = -EINVAL; | ||
| 221 | if (tb[TCA_FW_MASK]) { | 221 | if (tb[TCA_FW_MASK]) { |
| 222 | mask = nla_get_u32(tb[TCA_FW_MASK]); | 222 | mask = nla_get_u32(tb[TCA_FW_MASK]); |
| 223 | if (mask != head->mask) | 223 | if (mask != head->mask) |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 13aa47aa2ffb..1bc210ffcba2 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
| @@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch) | |||
| 962 | cbq_update(q); | 962 | cbq_update(q); |
| 963 | if ((incr -= incr2) < 0) | 963 | if ((incr -= incr2) < 0) |
| 964 | incr = 0; | 964 | incr = 0; |
| 965 | q->now += incr; | ||
| 966 | } else { | ||
| 967 | if (now > q->now) | ||
| 968 | q->now = now; | ||
| 965 | } | 969 | } |
| 966 | q->now += incr; | ||
| 967 | q->now_rt = now; | 970 | q->now_rt = now; |
| 968 | 971 | ||
| 969 | for (;;) { | 972 | for (;;) { |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 4e606fcb2534..55786283a3df 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
| @@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 195 | flow->deficit = q->quantum; | 195 | flow->deficit = q->quantum; |
| 196 | flow->dropped = 0; | 196 | flow->dropped = 0; |
| 197 | } | 197 | } |
| 198 | if (++sch->q.qlen < sch->limit) | 198 | if (++sch->q.qlen <= sch->limit) |
| 199 | return NET_XMIT_SUCCESS; | 199 | return NET_XMIT_SUCCESS; |
| 200 | 200 | ||
| 201 | q->drop_overlimit++; | 201 | q->drop_overlimit++; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ffad48109a22..eac7e0ee23c1 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
| @@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate) | |||
| 904 | u64 mult; | 904 | u64 mult; |
| 905 | int shift; | 905 | int shift; |
| 906 | 906 | ||
| 907 | r->rate_bps = rate << 3; | 907 | r->rate_bps = (u64)rate << 3; |
| 908 | r->shift = 0; | 908 | r->shift = 0; |
| 909 | r->mult = 1; | 909 | r->mult = 1; |
| 910 | /* | 910 | /* |
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e9a77f621c3d..d51852bba01c 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
| @@ -298,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, | |||
| 298 | new_num_classes == q->max_agg_classes - 1) /* agg no more full */ | 298 | new_num_classes == q->max_agg_classes - 1) /* agg no more full */ |
| 299 | hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); | 299 | hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); |
| 300 | 300 | ||
| 301 | /* The next assignment may let | ||
| 302 | * agg->initial_budget > agg->budgetmax | ||
| 303 | * hold, we will take it into account in charge_actual_service(). | ||
| 304 | */ | ||
| 301 | agg->budgetmax = new_num_classes * agg->lmax; | 305 | agg->budgetmax = new_num_classes * agg->lmax; |
| 302 | new_agg_weight = agg->class_weight * new_num_classes; | 306 | new_agg_weight = agg->class_weight * new_num_classes; |
| 303 | agg->inv_w = ONE_FP/new_agg_weight; | 307 | agg->inv_w = ONE_FP/new_agg_weight; |
| @@ -817,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q) | |||
| 817 | unsigned long old_vslot = q->oldV >> q->min_slot_shift; | 821 | unsigned long old_vslot = q->oldV >> q->min_slot_shift; |
| 818 | 822 | ||
| 819 | if (vslot != old_vslot) { | 823 | if (vslot != old_vslot) { |
| 820 | unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; | 824 | unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1; |
| 821 | qfq_move_groups(q, mask, IR, ER); | 825 | qfq_move_groups(q, mask, IR, ER); |
| 822 | qfq_move_groups(q, mask, IB, EB); | 826 | qfq_move_groups(q, mask, IB, EB); |
| 823 | } | 827 | } |
| @@ -988,12 +992,23 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg, | |||
| 988 | /* Update F according to the actual service received by the aggregate. */ | 992 | /* Update F according to the actual service received by the aggregate. */ |
| 989 | static inline void charge_actual_service(struct qfq_aggregate *agg) | 993 | static inline void charge_actual_service(struct qfq_aggregate *agg) |
| 990 | { | 994 | { |
| 991 | /* compute the service received by the aggregate */ | 995 | /* Compute the service received by the aggregate, taking into |
| 992 | u32 service_received = agg->initial_budget - agg->budget; | 996 | * account that, after decreasing the number of classes in |
| 997 | * agg, it may happen that | ||
| 998 | * agg->initial_budget - agg->budget > agg->bugdetmax | ||
| 999 | */ | ||
| 1000 | u32 service_received = min(agg->budgetmax, | ||
| 1001 | agg->initial_budget - agg->budget); | ||
| 993 | 1002 | ||
| 994 | agg->F = agg->S + (u64)service_received * agg->inv_w; | 1003 | agg->F = agg->S + (u64)service_received * agg->inv_w; |
| 995 | } | 1004 | } |
| 996 | 1005 | ||
| 1006 | static inline void qfq_update_agg_ts(struct qfq_sched *q, | ||
| 1007 | struct qfq_aggregate *agg, | ||
| 1008 | enum update_reason reason); | ||
| 1009 | |||
| 1010 | static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg); | ||
| 1011 | |||
| 997 | static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | 1012 | static struct sk_buff *qfq_dequeue(struct Qdisc *sch) |
| 998 | { | 1013 | { |
| 999 | struct qfq_sched *q = qdisc_priv(sch); | 1014 | struct qfq_sched *q = qdisc_priv(sch); |
| @@ -1021,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | |||
| 1021 | in_serv_agg->initial_budget = in_serv_agg->budget = | 1036 | in_serv_agg->initial_budget = in_serv_agg->budget = |
| 1022 | in_serv_agg->budgetmax; | 1037 | in_serv_agg->budgetmax; |
| 1023 | 1038 | ||
| 1024 | if (!list_empty(&in_serv_agg->active)) | 1039 | if (!list_empty(&in_serv_agg->active)) { |
| 1025 | /* | 1040 | /* |
| 1026 | * Still active: reschedule for | 1041 | * Still active: reschedule for |
| 1027 | * service. Possible optimization: if no other | 1042 | * service. Possible optimization: if no other |
| @@ -1032,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | |||
| 1032 | * handle it, we would need to maintain an | 1047 | * handle it, we would need to maintain an |
| 1033 | * extra num_active_aggs field. | 1048 | * extra num_active_aggs field. |
| 1034 | */ | 1049 | */ |
| 1035 | qfq_activate_agg(q, in_serv_agg, requeue); | 1050 | qfq_update_agg_ts(q, in_serv_agg, requeue); |
| 1036 | else if (sch->q.qlen == 0) { /* no aggregate to serve */ | 1051 | qfq_schedule_agg(q, in_serv_agg); |
| 1052 | } else if (sch->q.qlen == 0) { /* no aggregate to serve */ | ||
| 1037 | q->in_serv_agg = NULL; | 1053 | q->in_serv_agg = NULL; |
| 1038 | return NULL; | 1054 | return NULL; |
| 1039 | } | 1055 | } |
| @@ -1052,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch) | |||
| 1052 | qdisc_bstats_update(sch, skb); | 1068 | qdisc_bstats_update(sch, skb); |
| 1053 | 1069 | ||
| 1054 | agg_dequeue(in_serv_agg, cl, len); | 1070 | agg_dequeue(in_serv_agg, cl, len); |
| 1055 | in_serv_agg->budget -= len; | 1071 | /* If lmax is lowered, through qfq_change_class, for a class |
| 1072 | * owning pending packets with larger size than the new value | ||
| 1073 | * of lmax, then the following condition may hold. | ||
| 1074 | */ | ||
| 1075 | if (unlikely(in_serv_agg->budget < len)) | ||
| 1076 | in_serv_agg->budget = 0; | ||
| 1077 | else | ||
| 1078 | in_serv_agg->budget -= len; | ||
| 1079 | |||
| 1056 | q->V += (u64)len * IWSUM; | 1080 | q->V += (u64)len * IWSUM; |
| 1057 | pr_debug("qfq dequeue: len %u F %lld now %lld\n", | 1081 | pr_debug("qfq dequeue: len %u F %lld now %lld\n", |
| 1058 | len, (unsigned long long) in_serv_agg->F, | 1082 | len, (unsigned long long) in_serv_agg->F, |
| @@ -1217,17 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 1217 | cl->deficit = agg->lmax; | 1241 | cl->deficit = agg->lmax; |
| 1218 | list_add_tail(&cl->alist, &agg->active); | 1242 | list_add_tail(&cl->alist, &agg->active); |
| 1219 | 1243 | ||
| 1220 | if (list_first_entry(&agg->active, struct qfq_class, alist) != cl) | 1244 | if (list_first_entry(&agg->active, struct qfq_class, alist) != cl || |
| 1221 | return err; /* aggregate was not empty, nothing else to do */ | 1245 | q->in_serv_agg == agg) |
| 1246 | return err; /* non-empty or in service, nothing else to do */ | ||
| 1222 | 1247 | ||
| 1223 | /* recharge budget */ | 1248 | qfq_activate_agg(q, agg, enqueue); |
| 1224 | agg->initial_budget = agg->budget = agg->budgetmax; | ||
| 1225 | |||
| 1226 | qfq_update_agg_ts(q, agg, enqueue); | ||
| 1227 | if (q->in_serv_agg == NULL) | ||
| 1228 | q->in_serv_agg = agg; | ||
| 1229 | else if (agg != q->in_serv_agg) | ||
| 1230 | qfq_schedule_agg(q, agg); | ||
| 1231 | 1249 | ||
| 1232 | return err; | 1250 | return err; |
| 1233 | } | 1251 | } |
| @@ -1261,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg) | |||
| 1261 | /* group was surely ineligible, remove */ | 1279 | /* group was surely ineligible, remove */ |
| 1262 | __clear_bit(grp->index, &q->bitmaps[IR]); | 1280 | __clear_bit(grp->index, &q->bitmaps[IR]); |
| 1263 | __clear_bit(grp->index, &q->bitmaps[IB]); | 1281 | __clear_bit(grp->index, &q->bitmaps[IB]); |
| 1264 | } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V)) | 1282 | } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) && |
| 1283 | q->in_serv_agg == NULL) | ||
| 1265 | q->V = roundedS; | 1284 | q->V = roundedS; |
| 1266 | 1285 | ||
| 1267 | grp->S = roundedS; | 1286 | grp->S = roundedS; |
| @@ -1284,8 +1303,15 @@ skip_update: | |||
| 1284 | static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, | 1303 | static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, |
| 1285 | enum update_reason reason) | 1304 | enum update_reason reason) |
| 1286 | { | 1305 | { |
| 1306 | agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */ | ||
| 1307 | |||
| 1287 | qfq_update_agg_ts(q, agg, reason); | 1308 | qfq_update_agg_ts(q, agg, reason); |
| 1288 | qfq_schedule_agg(q, agg); | 1309 | if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */ |
| 1310 | q->in_serv_agg = agg; /* start serving this aggregate */ | ||
| 1311 | /* update V: to be in service, agg must be eligible */ | ||
| 1312 | q->oldV = q->V = agg->S; | ||
| 1313 | } else if (agg != q->in_serv_agg) | ||
| 1314 | qfq_schedule_agg(q, agg); | ||
| 1289 | } | 1315 | } |
| 1290 | 1316 | ||
| 1291 | static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, | 1317 | static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, |
| @@ -1357,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg) | |||
| 1357 | __set_bit(grp->index, &q->bitmaps[s]); | 1383 | __set_bit(grp->index, &q->bitmaps[s]); |
| 1358 | } | 1384 | } |
| 1359 | } | 1385 | } |
| 1360 | |||
| 1361 | qfq_update_eligible(q); | ||
| 1362 | } | 1386 | } |
| 1363 | 1387 | ||
| 1364 | static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) | 1388 | static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 43cd0dd9149d..d2709e2b7be6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -1079,7 +1079,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, | |||
| 1079 | transports) { | 1079 | transports) { |
| 1080 | 1080 | ||
| 1081 | if (transport == active) | 1081 | if (transport == active) |
| 1082 | break; | 1082 | continue; |
| 1083 | list_for_each_entry(chunk, &transport->transmitted, | 1083 | list_for_each_entry(chunk, &transport->transmitted, |
| 1084 | transmitted_list) { | 1084 | transmitted_list) { |
| 1085 | if (key == chunk->subh.data_hdr->tsn) { | 1085 | if (key == chunk->subh.data_hdr->tsn) { |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 2b3ef03c6098..12ed45dbe75d 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
| @@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
| 155 | 155 | ||
| 156 | /* SCTP-AUTH extensions*/ | 156 | /* SCTP-AUTH extensions*/ |
| 157 | INIT_LIST_HEAD(&ep->endpoint_shared_keys); | 157 | INIT_LIST_HEAD(&ep->endpoint_shared_keys); |
| 158 | null_key = sctp_auth_shkey_create(0, GFP_KERNEL); | 158 | null_key = sctp_auth_shkey_create(0, gfp); |
| 159 | if (!null_key) | 159 | if (!null_key) |
| 160 | goto nomem; | 160 | goto nomem; |
| 161 | 161 | ||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 5131fcfedb03..de1a0138317f 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net, | |||
| 2082 | } | 2082 | } |
| 2083 | 2083 | ||
| 2084 | /* Delete the tempory new association. */ | 2084 | /* Delete the tempory new association. */ |
| 2085 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); | 2085 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc)); |
| 2086 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | 2086 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); |
| 2087 | 2087 | ||
| 2088 | /* Restore association pointer to provide SCTP command interpeter | 2088 | /* Restore association pointer to provide SCTP command interpeter |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c99458df3f3f..b9070736b8d9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, | |||
| 5653 | if (len < sizeof(sctp_assoc_t)) | 5653 | if (len < sizeof(sctp_assoc_t)) |
| 5654 | return -EINVAL; | 5654 | return -EINVAL; |
| 5655 | 5655 | ||
| 5656 | /* Allow the struct to grow and fill in as much as possible */ | ||
| 5657 | len = min_t(size_t, len, sizeof(sas)); | ||
| 5658 | |||
| 5656 | if (copy_from_user(&sas, optval, len)) | 5659 | if (copy_from_user(&sas, optval, len)) |
| 5657 | return -EFAULT; | 5660 | return -EFAULT; |
| 5658 | 5661 | ||
| @@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, | |||
| 5686 | /* Mark beginning of a new observation period */ | 5689 | /* Mark beginning of a new observation period */ |
| 5687 | asoc->stats.max_obs_rto = asoc->rto_min; | 5690 | asoc->stats.max_obs_rto = asoc->rto_min; |
| 5688 | 5691 | ||
| 5689 | /* Allow the struct to grow and fill in as much as possible */ | ||
| 5690 | len = min_t(size_t, len, sizeof(sas)); | ||
| 5691 | |||
| 5692 | if (put_user(len, optlen)) | 5692 | if (put_user(len, optlen)) |
| 5693 | return -EFAULT; | 5693 | return -EFAULT; |
| 5694 | 5694 | ||
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c index 442ad4ed6315..825ea94415b3 100644 --- a/net/sctp/ssnmap.c +++ b/net/sctp/ssnmap.c | |||
| @@ -41,8 +41,6 @@ | |||
| 41 | #include <net/sctp/sctp.h> | 41 | #include <net/sctp/sctp.h> |
| 42 | #include <net/sctp/sm.h> | 42 | #include <net/sctp/sm.h> |
| 43 | 43 | ||
| 44 | #define MAX_KMALLOC_SIZE 131072 | ||
| 45 | |||
| 46 | static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, | 44 | static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, |
| 47 | __u16 out); | 45 | __u16 out); |
| 48 | 46 | ||
| @@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, | |||
| 65 | int size; | 63 | int size; |
| 66 | 64 | ||
| 67 | size = sctp_ssnmap_size(in, out); | 65 | size = sctp_ssnmap_size(in, out); |
| 68 | if (size <= MAX_KMALLOC_SIZE) | 66 | if (size <= KMALLOC_MAX_SIZE) |
| 69 | retval = kmalloc(size, gfp); | 67 | retval = kmalloc(size, gfp); |
| 70 | else | 68 | else |
| 71 | retval = (struct sctp_ssnmap *) | 69 | retval = (struct sctp_ssnmap *) |
| @@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, | |||
| 82 | return retval; | 80 | return retval; |
| 83 | 81 | ||
| 84 | fail_map: | 82 | fail_map: |
| 85 | if (size <= MAX_KMALLOC_SIZE) | 83 | if (size <= KMALLOC_MAX_SIZE) |
| 86 | kfree(retval); | 84 | kfree(retval); |
| 87 | else | 85 | else |
| 88 | free_pages((unsigned long)retval, get_order(size)); | 86 | free_pages((unsigned long)retval, get_order(size)); |
| @@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map) | |||
| 124 | int size; | 122 | int size; |
| 125 | 123 | ||
| 126 | size = sctp_ssnmap_size(map->in.len, map->out.len); | 124 | size = sctp_ssnmap_size(map->in.len, map->out.len); |
| 127 | if (size <= MAX_KMALLOC_SIZE) | 125 | if (size <= KMALLOC_MAX_SIZE) |
| 128 | kfree(map); | 126 | kfree(map); |
| 129 | else | 127 | else |
| 130 | free_pages((unsigned long)map, get_order(size)); | 128 | free_pages((unsigned long)map, get_order(size)); |
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index 5f25e0c92c31..396c45174e5b 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c | |||
| @@ -51,7 +51,7 @@ | |||
| 51 | static void sctp_tsnmap_update(struct sctp_tsnmap *map); | 51 | static void sctp_tsnmap_update(struct sctp_tsnmap *map); |
| 52 | static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, | 52 | static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, |
| 53 | __u16 len, __u16 *start, __u16 *end); | 53 | __u16 len, __u16 *start, __u16 *end); |
| 54 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap); | 54 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size); |
| 55 | 55 | ||
| 56 | /* Initialize a block of memory as a tsnmap. */ | 56 | /* Initialize a block of memory as a tsnmap. */ |
| 57 | struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, | 57 | struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, |
| @@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn, | |||
| 124 | 124 | ||
| 125 | gap = tsn - map->base_tsn; | 125 | gap = tsn - map->base_tsn; |
| 126 | 126 | ||
| 127 | if (gap >= map->len && !sctp_tsnmap_grow(map, gap)) | 127 | if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1)) |
| 128 | return -ENOMEM; | 128 | return -ENOMEM; |
| 129 | 129 | ||
| 130 | if (!sctp_tsnmap_has_gap(map) && gap == 0) { | 130 | if (!sctp_tsnmap_has_gap(map) && gap == 0) { |
| @@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map, | |||
| 360 | return ngaps; | 360 | return ngaps; |
| 361 | } | 361 | } |
| 362 | 362 | ||
| 363 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap) | 363 | static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size) |
| 364 | { | 364 | { |
| 365 | unsigned long *new; | 365 | unsigned long *new; |
| 366 | unsigned long inc; | 366 | unsigned long inc; |
| 367 | u16 len; | 367 | u16 len; |
| 368 | 368 | ||
| 369 | if (gap >= SCTP_TSN_MAP_SIZE) | 369 | if (size > SCTP_TSN_MAP_SIZE) |
| 370 | return 0; | 370 | return 0; |
| 371 | 371 | ||
| 372 | inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; | 372 | inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; |
| 373 | len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); | 373 | len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); |
| 374 | 374 | ||
| 375 | new = kzalloc(len>>3, GFP_ATOMIC); | 375 | new = kzalloc(len>>3, GFP_ATOMIC); |
| 376 | if (!new) | 376 | if (!new) |
| 377 | return 0; | 377 | return 0; |
| 378 | 378 | ||
| 379 | bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn); | 379 | bitmap_copy(new, map->tsn_map, |
| 380 | map->max_tsn_seen - map->cumulative_tsn_ack_point); | ||
| 380 | kfree(map->tsn_map); | 381 | kfree(map->tsn_map); |
| 381 | map->tsn_map = new; | 382 | map->tsn_map = new; |
| 382 | map->len = len; | 383 | map->len = len; |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ada17464b65b..0fd5b3d2df03 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
| @@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
| 106 | { | 106 | { |
| 107 | struct sk_buff_head temp; | 107 | struct sk_buff_head temp; |
| 108 | struct sctp_ulpevent *event; | 108 | struct sctp_ulpevent *event; |
| 109 | int event_eor = 0; | ||
| 109 | 110 | ||
| 110 | /* Create an event from the incoming chunk. */ | 111 | /* Create an event from the incoming chunk. */ |
| 111 | event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); | 112 | event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); |
| @@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
| 127 | /* Send event to the ULP. 'event' is the sctp_ulpevent for | 128 | /* Send event to the ULP. 'event' is the sctp_ulpevent for |
| 128 | * very first SKB on the 'temp' list. | 129 | * very first SKB on the 'temp' list. |
| 129 | */ | 130 | */ |
| 130 | if (event) | 131 | if (event) { |
| 132 | event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; | ||
| 131 | sctp_ulpq_tail_event(ulpq, event); | 133 | sctp_ulpq_tail_event(ulpq, event); |
| 134 | } | ||
| 132 | 135 | ||
| 133 | return 0; | 136 | return event_eor; |
| 134 | } | 137 | } |
| 135 | 138 | ||
| 136 | /* Add a new event for propagation to the ULP. */ | 139 | /* Add a new event for propagation to the ULP. */ |
| @@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) | |||
| 540 | ctsn = cevent->tsn; | 543 | ctsn = cevent->tsn; |
| 541 | 544 | ||
| 542 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | 545 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { |
| 546 | case SCTP_DATA_FIRST_FRAG: | ||
| 547 | if (!first_frag) | ||
| 548 | return NULL; | ||
| 549 | goto done; | ||
| 543 | case SCTP_DATA_MIDDLE_FRAG: | 550 | case SCTP_DATA_MIDDLE_FRAG: |
| 544 | if (!first_frag) { | 551 | if (!first_frag) { |
| 545 | first_frag = pos; | 552 | first_frag = pos; |
| 546 | next_tsn = ctsn + 1; | 553 | next_tsn = ctsn + 1; |
| 547 | last_frag = pos; | 554 | last_frag = pos; |
| 548 | } else if (next_tsn == ctsn) | 555 | } else if (next_tsn == ctsn) { |
| 549 | next_tsn++; | 556 | next_tsn++; |
| 550 | else | 557 | last_frag = pos; |
| 558 | } else | ||
| 551 | goto done; | 559 | goto done; |
| 552 | break; | 560 | break; |
| 553 | case SCTP_DATA_LAST_FRAG: | 561 | case SCTP_DATA_LAST_FRAG: |
| @@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) | |||
| 651 | } else | 659 | } else |
| 652 | goto done; | 660 | goto done; |
| 653 | break; | 661 | break; |
| 662 | |||
| 663 | case SCTP_DATA_LAST_FRAG: | ||
| 664 | if (!first_frag) | ||
| 665 | return NULL; | ||
| 666 | else | ||
| 667 | goto done; | ||
| 668 | break; | ||
| 669 | |||
| 654 | default: | 670 | default: |
| 655 | return NULL; | 671 | return NULL; |
| 656 | } | 672 | } |
| @@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, | |||
| 962 | struct sk_buff_head *list, __u16 needed) | 978 | struct sk_buff_head *list, __u16 needed) |
| 963 | { | 979 | { |
| 964 | __u16 freed = 0; | 980 | __u16 freed = 0; |
| 965 | __u32 tsn; | 981 | __u32 tsn, last_tsn; |
| 966 | struct sk_buff *skb; | 982 | struct sk_buff *skb, *flist, *last; |
| 967 | struct sctp_ulpevent *event; | 983 | struct sctp_ulpevent *event; |
| 968 | struct sctp_tsnmap *tsnmap; | 984 | struct sctp_tsnmap *tsnmap; |
| 969 | 985 | ||
| 970 | tsnmap = &ulpq->asoc->peer.tsn_map; | 986 | tsnmap = &ulpq->asoc->peer.tsn_map; |
| 971 | 987 | ||
| 972 | while ((skb = __skb_dequeue_tail(list)) != NULL) { | 988 | while ((skb = skb_peek_tail(list)) != NULL) { |
| 973 | freed += skb_headlen(skb); | ||
| 974 | event = sctp_skb2event(skb); | 989 | event = sctp_skb2event(skb); |
| 975 | tsn = event->tsn; | 990 | tsn = event->tsn; |
| 976 | 991 | ||
| 992 | /* Don't renege below the Cumulative TSN ACK Point. */ | ||
| 993 | if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) | ||
| 994 | break; | ||
| 995 | |||
| 996 | /* Events in ordering queue may have multiple fragments | ||
| 997 | * corresponding to additional TSNs. Sum the total | ||
| 998 | * freed space; find the last TSN. | ||
| 999 | */ | ||
| 1000 | freed += skb_headlen(skb); | ||
| 1001 | flist = skb_shinfo(skb)->frag_list; | ||
| 1002 | for (last = flist; flist; flist = flist->next) { | ||
| 1003 | last = flist; | ||
| 1004 | freed += skb_headlen(last); | ||
| 1005 | } | ||
| 1006 | if (last) | ||
| 1007 | last_tsn = sctp_skb2event(last)->tsn; | ||
| 1008 | else | ||
| 1009 | last_tsn = tsn; | ||
| 1010 | |||
| 1011 | /* Unlink the event, then renege all applicable TSNs. */ | ||
| 1012 | __skb_unlink(skb, list); | ||
| 977 | sctp_ulpevent_free(event); | 1013 | sctp_ulpevent_free(event); |
| 978 | sctp_tsnmap_renege(tsnmap, tsn); | 1014 | while (TSN_lte(tsn, last_tsn)) { |
| 1015 | sctp_tsnmap_renege(tsnmap, tsn); | ||
| 1016 | tsn++; | ||
| 1017 | } | ||
| 979 | if (freed >= needed) | 1018 | if (freed >= needed) |
| 980 | return freed; | 1019 | return freed; |
| 981 | } | 1020 | } |
| @@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, | |||
| 1002 | struct sctp_ulpevent *event; | 1041 | struct sctp_ulpevent *event; |
| 1003 | struct sctp_association *asoc; | 1042 | struct sctp_association *asoc; |
| 1004 | struct sctp_sock *sp; | 1043 | struct sctp_sock *sp; |
| 1044 | __u32 ctsn; | ||
| 1045 | struct sk_buff *skb; | ||
| 1005 | 1046 | ||
| 1006 | asoc = ulpq->asoc; | 1047 | asoc = ulpq->asoc; |
| 1007 | sp = sctp_sk(asoc->base.sk); | 1048 | sp = sctp_sk(asoc->base.sk); |
| 1008 | 1049 | ||
| 1009 | /* If the association is already in Partial Delivery mode | 1050 | /* If the association is already in Partial Delivery mode |
| 1010 | * we have noting to do. | 1051 | * we have nothing to do. |
| 1011 | */ | 1052 | */ |
| 1012 | if (ulpq->pd_mode) | 1053 | if (ulpq->pd_mode) |
| 1013 | return; | 1054 | return; |
| 1014 | 1055 | ||
| 1056 | /* Data must be at or below the Cumulative TSN ACK Point to | ||
| 1057 | * start partial delivery. | ||
| 1058 | */ | ||
| 1059 | skb = skb_peek(&asoc->ulpq.reasm); | ||
| 1060 | if (skb != NULL) { | ||
| 1061 | ctsn = sctp_skb2event(skb)->tsn; | ||
| 1062 | if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map))) | ||
| 1063 | return; | ||
| 1064 | } | ||
| 1065 | |||
| 1015 | /* If the user enabled fragment interleave socket option, | 1066 | /* If the user enabled fragment interleave socket option, |
| 1016 | * multiple associations can enter partial delivery. | 1067 | * multiple associations can enter partial delivery. |
| 1017 | * Otherwise, we can only enter partial delivery if the | 1068 | * Otherwise, we can only enter partial delivery if the |
| @@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
| 1054 | } | 1105 | } |
| 1055 | /* If able to free enough room, accept this chunk. */ | 1106 | /* If able to free enough room, accept this chunk. */ |
| 1056 | if (chunk && (freed >= needed)) { | 1107 | if (chunk && (freed >= needed)) { |
| 1057 | __u32 tsn; | 1108 | int retval; |
| 1058 | tsn = ntohl(chunk->subh.data_hdr->tsn); | 1109 | retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); |
| 1059 | sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); | 1110 | /* |
| 1060 | sctp_ulpq_tail_data(ulpq, chunk, gfp); | 1111 | * Enter partial delivery if chunk has not been |
| 1061 | 1112 | * delivered; otherwise, drain the reassembly queue. | |
| 1062 | sctp_ulpq_partial_delivery(ulpq, gfp); | 1113 | */ |
| 1114 | if (retval <= 0) | ||
| 1115 | sctp_ulpq_partial_delivery(ulpq, gfp); | ||
| 1116 | else if (retval == 1) | ||
| 1117 | sctp_ulpq_reasm_drain(ulpq); | ||
| 1063 | } | 1118 | } |
| 1064 | 1119 | ||
| 1065 | sk_mem_reclaim(asoc->base.sk); | 1120 | sk_mem_reclaim(asoc->base.sk); |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index f7d34e7b6f81..5ead60550895 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
| @@ -447,17 +447,21 @@ static int rsc_parse(struct cache_detail *cd, | |||
| 447 | else { | 447 | else { |
| 448 | int N, i; | 448 | int N, i; |
| 449 | 449 | ||
| 450 | /* | ||
| 451 | * NOTE: we skip uid_valid()/gid_valid() checks here: | ||
| 452 | * instead, * -1 id's are later mapped to the | ||
| 453 | * (export-specific) anonymous id by nfsd_setuser. | ||
| 454 | * | ||
| 455 | * (But supplementary gid's get no such special | ||
| 456 | * treatment so are checked for validity here.) | ||
| 457 | */ | ||
| 450 | /* uid */ | 458 | /* uid */ |
| 451 | rsci.cred.cr_uid = make_kuid(&init_user_ns, id); | 459 | rsci.cred.cr_uid = make_kuid(&init_user_ns, id); |
| 452 | if (!uid_valid(rsci.cred.cr_uid)) | ||
| 453 | goto out; | ||
| 454 | 460 | ||
| 455 | /* gid */ | 461 | /* gid */ |
| 456 | if (get_int(&mesg, &id)) | 462 | if (get_int(&mesg, &id)) |
| 457 | goto out; | 463 | goto out; |
| 458 | rsci.cred.cr_gid = make_kgid(&init_user_ns, id); | 464 | rsci.cred.cr_gid = make_kgid(&init_user_ns, id); |
| 459 | if (!gid_valid(rsci.cred.cr_gid)) | ||
| 460 | goto out; | ||
| 461 | 465 | ||
| 462 | /* number of additional gid's */ | 466 | /* number of additional gid's */ |
| 463 | if (get_int(&mesg, &N)) | 467 | if (get_int(&mesg, &N)) |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index dcc446e7fbf6..d5f35f15af98 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -304,10 +304,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru | |||
| 304 | err = rpciod_up(); | 304 | err = rpciod_up(); |
| 305 | if (err) | 305 | if (err) |
| 306 | goto out_no_rpciod; | 306 | goto out_no_rpciod; |
| 307 | err = -EINVAL; | ||
| 308 | if (!xprt) | ||
| 309 | goto out_no_xprt; | ||
| 310 | 307 | ||
| 308 | err = -EINVAL; | ||
| 311 | if (args->version >= program->nrvers) | 309 | if (args->version >= program->nrvers) |
| 312 | goto out_err; | 310 | goto out_err; |
| 313 | version = program->version[args->version]; | 311 | version = program->version[args->version]; |
| @@ -382,10 +380,9 @@ out_no_principal: | |||
| 382 | out_no_stats: | 380 | out_no_stats: |
| 383 | kfree(clnt); | 381 | kfree(clnt); |
| 384 | out_err: | 382 | out_err: |
| 385 | xprt_put(xprt); | ||
| 386 | out_no_xprt: | ||
| 387 | rpciod_down(); | 383 | rpciod_down(); |
| 388 | out_no_rpciod: | 384 | out_no_rpciod: |
| 385 | xprt_put(xprt); | ||
| 389 | return ERR_PTR(err); | 386 | return ERR_PTR(err); |
| 390 | } | 387 | } |
| 391 | 388 | ||
| @@ -512,7 +509,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, | |||
| 512 | new = rpc_new_client(args, xprt); | 509 | new = rpc_new_client(args, xprt); |
| 513 | if (IS_ERR(new)) { | 510 | if (IS_ERR(new)) { |
| 514 | err = PTR_ERR(new); | 511 | err = PTR_ERR(new); |
| 515 | goto out_put; | 512 | goto out_err; |
| 516 | } | 513 | } |
| 517 | 514 | ||
| 518 | atomic_inc(&clnt->cl_count); | 515 | atomic_inc(&clnt->cl_count); |
| @@ -525,8 +522,6 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, | |||
| 525 | new->cl_chatty = clnt->cl_chatty; | 522 | new->cl_chatty = clnt->cl_chatty; |
| 526 | return new; | 523 | return new; |
| 527 | 524 | ||
| 528 | out_put: | ||
| 529 | xprt_put(xprt); | ||
| 530 | out_err: | 525 | out_err: |
| 531 | dprintk("RPC: %s: returned error %d\n", __func__, err); | 526 | dprintk("RPC: %s: returned error %d\n", __func__, err); |
| 532 | return ERR_PTR(err); | 527 | return ERR_PTR(err); |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 7b9b40224a27..a9129f8d7070 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
| @@ -1174,6 +1174,8 @@ static struct file_system_type rpc_pipe_fs_type = { | |||
| 1174 | .mount = rpc_mount, | 1174 | .mount = rpc_mount, |
| 1175 | .kill_sb = rpc_kill_sb, | 1175 | .kill_sb = rpc_kill_sb, |
| 1176 | }; | 1176 | }; |
| 1177 | MODULE_ALIAS_FS("rpc_pipefs"); | ||
| 1178 | MODULE_ALIAS("rpc_pipefs"); | ||
| 1177 | 1179 | ||
| 1178 | static void | 1180 | static void |
| 1179 | init_once(void *foo) | 1181 | init_once(void *foo) |
| @@ -1218,6 +1220,3 @@ void unregister_rpc_pipefs(void) | |||
| 1218 | kmem_cache_destroy(rpc_inode_cachep); | 1220 | kmem_cache_destroy(rpc_inode_cachep); |
| 1219 | unregister_filesystem(&rpc_pipe_fs_type); | 1221 | unregister_filesystem(&rpc_pipe_fs_type); |
| 1220 | } | 1222 | } |
| 1221 | |||
| 1222 | /* Make 'mount -t rpc_pipefs ...' autoload this module. */ | ||
| 1223 | MODULE_ALIAS("rpc_pipefs"); | ||
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index fb20f25ddec9..f8529fc8e542 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
| @@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, | |||
| 180 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | 180 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); |
| 181 | task->tk_waitqueue = queue; | 181 | task->tk_waitqueue = queue; |
| 182 | queue->qlen++; | 182 | queue->qlen++; |
| 183 | /* barrier matches the read in rpc_wake_up_task_queue_locked() */ | ||
| 184 | smp_wmb(); | ||
| 183 | rpc_set_queued(task); | 185 | rpc_set_queued(task); |
| 184 | 186 | ||
| 185 | dprintk("RPC: %5u added to queue %p \"%s\"\n", | 187 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
| @@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task | |||
| 430 | */ | 432 | */ |
| 431 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) | 433 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) |
| 432 | { | 434 | { |
| 433 | if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) | 435 | if (RPC_IS_QUEUED(task)) { |
| 434 | __rpc_do_wake_up_task(queue, task); | 436 | smp_rmb(); |
| 437 | if (task->tk_waitqueue == queue) | ||
| 438 | __rpc_do_wake_up_task(queue, task); | ||
| 439 | } | ||
| 435 | } | 440 | } |
| 436 | 441 | ||
| 437 | /* | 442 | /* |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c1d8476b7692..3d02130828da 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -849,6 +849,14 @@ static void xs_tcp_close(struct rpc_xprt *xprt) | |||
| 849 | xs_tcp_shutdown(xprt); | 849 | xs_tcp_shutdown(xprt); |
| 850 | } | 850 | } |
| 851 | 851 | ||
| 852 | static void xs_local_destroy(struct rpc_xprt *xprt) | ||
| 853 | { | ||
| 854 | xs_close(xprt); | ||
| 855 | xs_free_peer_addresses(xprt); | ||
| 856 | xprt_free(xprt); | ||
| 857 | module_put(THIS_MODULE); | ||
| 858 | } | ||
| 859 | |||
| 852 | /** | 860 | /** |
| 853 | * xs_destroy - prepare to shutdown a transport | 861 | * xs_destroy - prepare to shutdown a transport |
| 854 | * @xprt: doomed transport | 862 | * @xprt: doomed transport |
| @@ -862,10 +870,7 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
| 862 | 870 | ||
| 863 | cancel_delayed_work_sync(&transport->connect_worker); | 871 | cancel_delayed_work_sync(&transport->connect_worker); |
| 864 | 872 | ||
| 865 | xs_close(xprt); | 873 | xs_local_destroy(xprt); |
| 866 | xs_free_peer_addresses(xprt); | ||
| 867 | xprt_free(xprt); | ||
| 868 | module_put(THIS_MODULE); | ||
| 869 | } | 874 | } |
| 870 | 875 | ||
| 871 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) | 876 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) |
| @@ -2482,7 +2487,7 @@ static struct rpc_xprt_ops xs_local_ops = { | |||
| 2482 | .send_request = xs_local_send_request, | 2487 | .send_request = xs_local_send_request, |
| 2483 | .set_retrans_timeout = xprt_set_retrans_timeout_def, | 2488 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
| 2484 | .close = xs_close, | 2489 | .close = xs_close, |
| 2485 | .destroy = xs_destroy, | 2490 | .destroy = xs_local_destroy, |
| 2486 | .print_stats = xs_local_print_stats, | 2491 | .print_stats = xs_local_print_stats, |
| 2487 | }; | 2492 | }; |
| 2488 | 2493 | ||
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index a9622b6cd916..515ce38e4f4c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -790,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) | |||
| 790 | if (addr) { | 790 | if (addr) { |
| 791 | addr->family = AF_TIPC; | 791 | addr->family = AF_TIPC; |
| 792 | addr->addrtype = TIPC_ADDR_ID; | 792 | addr->addrtype = TIPC_ADDR_ID; |
| 793 | memset(&addr->addr, 0, sizeof(addr->addr)); | ||
| 793 | addr->addr.id.ref = msg_origport(msg); | 794 | addr->addr.id.ref = msg_origport(msg); |
| 794 | addr->addr.id.node = msg_orignode(msg); | 795 | addr->addr.id.node = msg_orignode(msg); |
| 795 | addr->addr.name.domain = 0; /* could leave uninitialized */ | 796 | addr->addr.name.domain = 0; /* could leave uninitialized */ |
| @@ -904,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, | |||
| 904 | goto exit; | 905 | goto exit; |
| 905 | } | 906 | } |
| 906 | 907 | ||
| 908 | /* will be updated in set_orig_addr() if needed */ | ||
| 909 | m->msg_namelen = 0; | ||
| 910 | |||
| 907 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 911 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
| 908 | restart: | 912 | restart: |
| 909 | 913 | ||
| @@ -1013,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, | |||
| 1013 | goto exit; | 1017 | goto exit; |
| 1014 | } | 1018 | } |
| 1015 | 1019 | ||
| 1020 | /* will be updated in set_orig_addr() if needed */ | ||
| 1021 | m->msg_namelen = 0; | ||
| 1022 | |||
| 1016 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); | 1023 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); |
| 1017 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 1024 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
| 1018 | 1025 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 51be64f163ec..2db702d82e7d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk) | |||
| 382 | #endif | 382 | #endif |
| 383 | } | 383 | } |
| 384 | 384 | ||
| 385 | static int unix_release_sock(struct sock *sk, int embrion) | 385 | static void unix_release_sock(struct sock *sk, int embrion) |
| 386 | { | 386 | { |
| 387 | struct unix_sock *u = unix_sk(sk); | 387 | struct unix_sock *u = unix_sk(sk); |
| 388 | struct path path; | 388 | struct path path; |
| @@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion) | |||
| 451 | 451 | ||
| 452 | if (unix_tot_inflight) | 452 | if (unix_tot_inflight) |
| 453 | unix_gc(); /* Garbage collect fds */ | 453 | unix_gc(); /* Garbage collect fds */ |
| 454 | |||
| 455 | return 0; | ||
| 456 | } | 454 | } |
| 457 | 455 | ||
| 458 | static void init_peercred(struct sock *sk) | 456 | static void init_peercred(struct sock *sk) |
| @@ -699,9 +697,10 @@ static int unix_release(struct socket *sock) | |||
| 699 | if (!sk) | 697 | if (!sk) |
| 700 | return 0; | 698 | return 0; |
| 701 | 699 | ||
| 700 | unix_release_sock(sk, 0); | ||
| 702 | sock->sk = NULL; | 701 | sock->sk = NULL; |
| 703 | 702 | ||
| 704 | return unix_release_sock(sk, 0); | 703 | return 0; |
| 705 | } | 704 | } |
| 706 | 705 | ||
| 707 | static int unix_autobind(struct socket *sock) | 706 | static int unix_autobind(struct socket *sock) |
| @@ -1994,7 +1993,7 @@ again: | |||
| 1994 | if ((UNIXCB(skb).pid != siocb->scm->pid) || | 1993 | if ((UNIXCB(skb).pid != siocb->scm->pid) || |
| 1995 | (UNIXCB(skb).cred != siocb->scm->cred)) | 1994 | (UNIXCB(skb).cred != siocb->scm->cred)) |
| 1996 | break; | 1995 | break; |
| 1997 | } else { | 1996 | } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { |
| 1998 | /* Copy credentials */ | 1997 | /* Copy credentials */ |
| 1999 | scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); | 1998 | scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); |
| 2000 | check_creds = 1; | 1999 | check_creds = 1; |
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index ca511c4f388a..7f93e2a42d7a 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
| @@ -207,7 +207,7 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) | |||
| 207 | struct vsock_sock *vsk; | 207 | struct vsock_sock *vsk; |
| 208 | 208 | ||
| 209 | list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) | 209 | list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) |
| 210 | if (vsock_addr_equals_addr_any(addr, &vsk->local_addr)) | 210 | if (addr->svm_port == vsk->local_addr.svm_port) |
| 211 | return sk_vsock(vsk); | 211 | return sk_vsock(vsk); |
| 212 | 212 | ||
| 213 | return NULL; | 213 | return NULL; |
| @@ -220,8 +220,8 @@ static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, | |||
| 220 | 220 | ||
| 221 | list_for_each_entry(vsk, vsock_connected_sockets(src, dst), | 221 | list_for_each_entry(vsk, vsock_connected_sockets(src, dst), |
| 222 | connected_table) { | 222 | connected_table) { |
| 223 | if (vsock_addr_equals_addr(src, &vsk->remote_addr) | 223 | if (vsock_addr_equals_addr(src, &vsk->remote_addr) && |
| 224 | && vsock_addr_equals_addr(dst, &vsk->local_addr)) { | 224 | dst->svm_port == vsk->local_addr.svm_port) { |
| 225 | return sk_vsock(vsk); | 225 | return sk_vsock(vsk); |
| 226 | } | 226 | } |
| 227 | } | 227 | } |
| @@ -1670,6 +1670,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb, | |||
| 1670 | vsk = vsock_sk(sk); | 1670 | vsk = vsock_sk(sk); |
| 1671 | err = 0; | 1671 | err = 0; |
| 1672 | 1672 | ||
| 1673 | msg->msg_namelen = 0; | ||
| 1674 | |||
| 1673 | lock_sock(sk); | 1675 | lock_sock(sk); |
| 1674 | 1676 | ||
| 1675 | if (sk->sk_state != SS_CONNECTED) { | 1677 | if (sk->sk_state != SS_CONNECTED) { |
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index a70ace83a153..5e04d3d96285 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c | |||
| @@ -464,19 +464,16 @@ static struct sock *vmci_transport_get_pending( | |||
| 464 | struct vsock_sock *vlistener; | 464 | struct vsock_sock *vlistener; |
| 465 | struct vsock_sock *vpending; | 465 | struct vsock_sock *vpending; |
| 466 | struct sock *pending; | 466 | struct sock *pending; |
| 467 | struct sockaddr_vm src; | ||
| 468 | |||
| 469 | vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); | ||
| 467 | 470 | ||
| 468 | vlistener = vsock_sk(listener); | 471 | vlistener = vsock_sk(listener); |
| 469 | 472 | ||
| 470 | list_for_each_entry(vpending, &vlistener->pending_links, | 473 | list_for_each_entry(vpending, &vlistener->pending_links, |
| 471 | pending_links) { | 474 | pending_links) { |
| 472 | struct sockaddr_vm src; | ||
| 473 | struct sockaddr_vm dst; | ||
| 474 | |||
| 475 | vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); | ||
| 476 | vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port); | ||
| 477 | |||
| 478 | if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && | 475 | if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && |
| 479 | vsock_addr_equals_addr(&dst, &vpending->local_addr)) { | 476 | pkt->dst_port == vpending->local_addr.svm_port) { |
| 480 | pending = sk_vsock(vpending); | 477 | pending = sk_vsock(vpending); |
| 481 | sock_hold(pending); | 478 | sock_hold(pending); |
| 482 | goto found; | 479 | goto found; |
| @@ -739,10 +736,15 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg) | |||
| 739 | */ | 736 | */ |
| 740 | bh_lock_sock(sk); | 737 | bh_lock_sock(sk); |
| 741 | 738 | ||
| 742 | if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED) | 739 | if (!sock_owned_by_user(sk)) { |
| 743 | vmci_trans(vsk)->notify_ops->handle_notify_pkt( | 740 | /* The local context ID may be out of date, update it. */ |
| 744 | sk, pkt, true, &dst, &src, | 741 | vsk->local_addr.svm_cid = dst.svm_cid; |
| 745 | &bh_process_pkt); | 742 | |
| 743 | if (sk->sk_state == SS_CONNECTED) | ||
| 744 | vmci_trans(vsk)->notify_ops->handle_notify_pkt( | ||
| 745 | sk, pkt, true, &dst, &src, | ||
| 746 | &bh_process_pkt); | ||
| 747 | } | ||
| 746 | 748 | ||
| 747 | bh_unlock_sock(sk); | 749 | bh_unlock_sock(sk); |
| 748 | 750 | ||
| @@ -902,6 +904,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work) | |||
| 902 | 904 | ||
| 903 | lock_sock(sk); | 905 | lock_sock(sk); |
| 904 | 906 | ||
| 907 | /* The local context ID may be out of date. */ | ||
| 908 | vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context; | ||
| 909 | |||
| 905 | switch (sk->sk_state) { | 910 | switch (sk->sk_state) { |
| 906 | case SS_LISTEN: | 911 | case SS_LISTEN: |
| 907 | vmci_transport_recv_listen(sk, pkt); | 912 | vmci_transport_recv_listen(sk, pkt); |
| @@ -958,6 +963,10 @@ static int vmci_transport_recv_listen(struct sock *sk, | |||
| 958 | pending = vmci_transport_get_pending(sk, pkt); | 963 | pending = vmci_transport_get_pending(sk, pkt); |
| 959 | if (pending) { | 964 | if (pending) { |
| 960 | lock_sock(pending); | 965 | lock_sock(pending); |
| 966 | |||
| 967 | /* The local context ID may be out of date. */ | ||
| 968 | vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context; | ||
| 969 | |||
| 961 | switch (pending->sk_state) { | 970 | switch (pending->sk_state) { |
| 962 | case SS_CONNECTING: | 971 | case SS_CONNECTING: |
| 963 | err = vmci_transport_recv_connecting_server(sk, | 972 | err = vmci_transport_recv_connecting_server(sk, |
| @@ -1727,6 +1736,8 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, | |||
| 1727 | if (flags & MSG_OOB || flags & MSG_ERRQUEUE) | 1736 | if (flags & MSG_OOB || flags & MSG_ERRQUEUE) |
| 1728 | return -EOPNOTSUPP; | 1737 | return -EOPNOTSUPP; |
| 1729 | 1738 | ||
| 1739 | msg->msg_namelen = 0; | ||
| 1740 | |||
| 1730 | /* Retrieve the head sk_buff from the socket's receive queue. */ | 1741 | /* Retrieve the head sk_buff from the socket's receive queue. */ |
| 1731 | err = 0; | 1742 | err = 0; |
| 1732 | skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); | 1743 | skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); |
| @@ -1759,7 +1770,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, | |||
| 1759 | if (err) | 1770 | if (err) |
| 1760 | goto out; | 1771 | goto out; |
| 1761 | 1772 | ||
| 1762 | msg->msg_namelen = 0; | ||
| 1763 | if (msg->msg_name) { | 1773 | if (msg->msg_name) { |
| 1764 | struct sockaddr_vm *vm_addr; | 1774 | struct sockaddr_vm *vm_addr; |
| 1765 | 1775 | ||
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c index b7df1aea7c59..ec2611b4ea0e 100644 --- a/net/vmw_vsock/vsock_addr.c +++ b/net/vmw_vsock/vsock_addr.c | |||
| @@ -64,16 +64,6 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, | |||
| 64 | } | 64 | } |
| 65 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); | 65 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); |
| 66 | 66 | ||
| 67 | bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, | ||
| 68 | const struct sockaddr_vm *other) | ||
| 69 | { | ||
| 70 | return (addr->svm_cid == VMADDR_CID_ANY || | ||
| 71 | other->svm_cid == VMADDR_CID_ANY || | ||
| 72 | addr->svm_cid == other->svm_cid) && | ||
| 73 | addr->svm_port == other->svm_port; | ||
| 74 | } | ||
| 75 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any); | ||
| 76 | |||
| 77 | int vsock_addr_cast(const struct sockaddr *addr, | 67 | int vsock_addr_cast(const struct sockaddr *addr, |
| 78 | size_t len, struct sockaddr_vm **out_addr) | 68 | size_t len, struct sockaddr_vm **out_addr) |
| 79 | { | 69 | { |
diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h index cdfbcefdf843..9ccd5316eac0 100644 --- a/net/vmw_vsock/vsock_addr.h +++ b/net/vmw_vsock/vsock_addr.h | |||
| @@ -24,8 +24,6 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr); | |||
| 24 | void vsock_addr_unbind(struct sockaddr_vm *addr); | 24 | void vsock_addr_unbind(struct sockaddr_vm *addr); |
| 25 | bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, | 25 | bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, |
| 26 | const struct sockaddr_vm *other); | 26 | const struct sockaddr_vm *other); |
| 27 | bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, | ||
| 28 | const struct sockaddr_vm *other); | ||
| 29 | int vsock_addr_cast(const struct sockaddr *addr, size_t len, | 27 | int vsock_addr_cast(const struct sockaddr *addr, size_t len, |
| 30 | struct sockaddr_vm **out_addr); | 28 | struct sockaddr_vm **out_addr); |
| 31 | 29 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index 5ffff039b017..6ddf74f0ae1e 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
| @@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) | |||
| 212 | rdev_rfkill_poll(rdev); | 212 | rdev_rfkill_poll(rdev); |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, | ||
| 216 | struct wireless_dev *wdev) | ||
| 217 | { | ||
| 218 | lockdep_assert_held(&rdev->devlist_mtx); | ||
| 219 | lockdep_assert_held(&rdev->sched_scan_mtx); | ||
| 220 | |||
| 221 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) | ||
| 222 | return; | ||
| 223 | |||
| 224 | if (!wdev->p2p_started) | ||
| 225 | return; | ||
| 226 | |||
| 227 | rdev_stop_p2p_device(rdev, wdev); | ||
| 228 | wdev->p2p_started = false; | ||
| 229 | |||
| 230 | rdev->opencount--; | ||
| 231 | |||
| 232 | if (rdev->scan_req && rdev->scan_req->wdev == wdev) { | ||
| 233 | bool busy = work_busy(&rdev->scan_done_wk); | ||
| 234 | |||
| 235 | /* | ||
| 236 | * If the work isn't pending or running (in which case it would | ||
| 237 | * be waiting for the lock we hold) the driver didn't properly | ||
| 238 | * cancel the scan when the interface was removed. In this case | ||
| 239 | * warn and leak the scan request object to not crash later. | ||
| 240 | */ | ||
| 241 | WARN_ON(!busy); | ||
| 242 | |||
| 243 | rdev->scan_req->aborted = true; | ||
| 244 | ___cfg80211_scan_done(rdev, !busy); | ||
| 245 | } | ||
| 246 | } | ||
| 247 | |||
| 215 | static int cfg80211_rfkill_set_block(void *data, bool blocked) | 248 | static int cfg80211_rfkill_set_block(void *data, bool blocked) |
| 216 | { | 249 | { |
| 217 | struct cfg80211_registered_device *rdev = data; | 250 | struct cfg80211_registered_device *rdev = data; |
| @@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) | |||
| 221 | return 0; | 254 | return 0; |
| 222 | 255 | ||
| 223 | rtnl_lock(); | 256 | rtnl_lock(); |
| 224 | mutex_lock(&rdev->devlist_mtx); | 257 | |
| 258 | /* read-only iteration need not hold the devlist_mtx */ | ||
| 225 | 259 | ||
| 226 | list_for_each_entry(wdev, &rdev->wdev_list, list) { | 260 | list_for_each_entry(wdev, &rdev->wdev_list, list) { |
| 227 | if (wdev->netdev) { | 261 | if (wdev->netdev) { |
| @@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) | |||
| 231 | /* otherwise, check iftype */ | 265 | /* otherwise, check iftype */ |
| 232 | switch (wdev->iftype) { | 266 | switch (wdev->iftype) { |
| 233 | case NL80211_IFTYPE_P2P_DEVICE: | 267 | case NL80211_IFTYPE_P2P_DEVICE: |
| 234 | if (!wdev->p2p_started) | 268 | /* but this requires it */ |
| 235 | break; | 269 | mutex_lock(&rdev->devlist_mtx); |
| 236 | rdev_stop_p2p_device(rdev, wdev); | 270 | mutex_lock(&rdev->sched_scan_mtx); |
| 237 | wdev->p2p_started = false; | 271 | cfg80211_stop_p2p_device(rdev, wdev); |
| 238 | rdev->opencount--; | 272 | mutex_unlock(&rdev->sched_scan_mtx); |
| 273 | mutex_unlock(&rdev->devlist_mtx); | ||
| 239 | break; | 274 | break; |
| 240 | default: | 275 | default: |
| 241 | break; | 276 | break; |
| 242 | } | 277 | } |
| 243 | } | 278 | } |
| 244 | 279 | ||
| 245 | mutex_unlock(&rdev->devlist_mtx); | ||
| 246 | rtnl_unlock(); | 280 | rtnl_unlock(); |
| 247 | 281 | ||
| 248 | return 0; | 282 | return 0; |
| @@ -367,8 +401,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) | |||
| 367 | rdev->wiphy.rts_threshold = (u32) -1; | 401 | rdev->wiphy.rts_threshold = (u32) -1; |
| 368 | rdev->wiphy.coverage_class = 0; | 402 | rdev->wiphy.coverage_class = 0; |
| 369 | 403 | ||
| 370 | rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH | | 404 | rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH; |
| 371 | NL80211_FEATURE_ADVERTISE_CHAN_LIMITS; | ||
| 372 | 405 | ||
| 373 | return &rdev->wiphy; | 406 | return &rdev->wiphy; |
| 374 | } | 407 | } |
| @@ -746,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work) | |||
| 746 | wdev = container_of(work, struct wireless_dev, cleanup_work); | 779 | wdev = container_of(work, struct wireless_dev, cleanup_work); |
| 747 | rdev = wiphy_to_dev(wdev->wiphy); | 780 | rdev = wiphy_to_dev(wdev->wiphy); |
| 748 | 781 | ||
| 749 | cfg80211_lock_rdev(rdev); | 782 | mutex_lock(&rdev->sched_scan_mtx); |
| 750 | 783 | ||
| 751 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { | 784 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { |
| 752 | rdev->scan_req->aborted = true; | 785 | rdev->scan_req->aborted = true; |
| 753 | ___cfg80211_scan_done(rdev, true); | 786 | ___cfg80211_scan_done(rdev, true); |
| 754 | } | 787 | } |
| 755 | 788 | ||
| 756 | cfg80211_unlock_rdev(rdev); | ||
| 757 | |||
| 758 | mutex_lock(&rdev->sched_scan_mtx); | ||
| 759 | |||
| 760 | if (WARN_ON(rdev->sched_scan_req && | 789 | if (WARN_ON(rdev->sched_scan_req && |
| 761 | rdev->sched_scan_req->dev == wdev->netdev)) { | 790 | rdev->sched_scan_req->dev == wdev->netdev)) { |
| 762 | __cfg80211_stop_sched_scan(rdev, false); | 791 | __cfg80211_stop_sched_scan(rdev, false); |
| @@ -782,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) | |||
| 782 | return; | 811 | return; |
| 783 | 812 | ||
| 784 | mutex_lock(&rdev->devlist_mtx); | 813 | mutex_lock(&rdev->devlist_mtx); |
| 814 | mutex_lock(&rdev->sched_scan_mtx); | ||
| 785 | list_del_rcu(&wdev->list); | 815 | list_del_rcu(&wdev->list); |
| 786 | rdev->devlist_generation++; | 816 | rdev->devlist_generation++; |
| 787 | 817 | ||
| 788 | switch (wdev->iftype) { | 818 | switch (wdev->iftype) { |
| 789 | case NL80211_IFTYPE_P2P_DEVICE: | 819 | case NL80211_IFTYPE_P2P_DEVICE: |
| 790 | if (!wdev->p2p_started) | 820 | cfg80211_stop_p2p_device(rdev, wdev); |
| 791 | break; | ||
| 792 | rdev_stop_p2p_device(rdev, wdev); | ||
| 793 | wdev->p2p_started = false; | ||
| 794 | rdev->opencount--; | ||
| 795 | break; | 821 | break; |
| 796 | default: | 822 | default: |
| 797 | WARN_ON_ONCE(1); | 823 | WARN_ON_ONCE(1); |
| 798 | break; | 824 | break; |
| 799 | } | 825 | } |
| 826 | mutex_unlock(&rdev->sched_scan_mtx); | ||
| 800 | mutex_unlock(&rdev->devlist_mtx); | 827 | mutex_unlock(&rdev->devlist_mtx); |
| 801 | } | 828 | } |
| 802 | EXPORT_SYMBOL(cfg80211_unregister_wdev); | 829 | EXPORT_SYMBOL(cfg80211_unregister_wdev); |
| @@ -937,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
| 937 | cfg80211_update_iface_num(rdev, wdev->iftype, 1); | 964 | cfg80211_update_iface_num(rdev, wdev->iftype, 1); |
| 938 | cfg80211_lock_rdev(rdev); | 965 | cfg80211_lock_rdev(rdev); |
| 939 | mutex_lock(&rdev->devlist_mtx); | 966 | mutex_lock(&rdev->devlist_mtx); |
| 967 | mutex_lock(&rdev->sched_scan_mtx); | ||
| 940 | wdev_lock(wdev); | 968 | wdev_lock(wdev); |
| 941 | switch (wdev->iftype) { | 969 | switch (wdev->iftype) { |
| 942 | #ifdef CONFIG_CFG80211_WEXT | 970 | #ifdef CONFIG_CFG80211_WEXT |
| @@ -968,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
| 968 | break; | 996 | break; |
| 969 | } | 997 | } |
| 970 | wdev_unlock(wdev); | 998 | wdev_unlock(wdev); |
| 999 | mutex_unlock(&rdev->sched_scan_mtx); | ||
| 971 | rdev->opencount++; | 1000 | rdev->opencount++; |
| 972 | mutex_unlock(&rdev->devlist_mtx); | 1001 | mutex_unlock(&rdev->devlist_mtx); |
| 973 | cfg80211_unlock_rdev(rdev); | 1002 | cfg80211_unlock_rdev(rdev); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index 3aec0e429d8a..5845c2b37aa8 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
| @@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, | |||
| 503 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, | 503 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, |
| 504 | enum nl80211_iftype iftype, int num); | 504 | enum nl80211_iftype iftype, int num); |
| 505 | 505 | ||
| 506 | void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, | ||
| 507 | struct wireless_dev *wdev); | ||
| 508 | |||
| 506 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 | 509 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 |
| 507 | 510 | ||
| 508 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS | 511 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 35545ccc30fd..58e13a8c95f9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -554,27 +554,8 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, | |||
| 554 | if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && | 554 | if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && |
| 555 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) | 555 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) |
| 556 | goto nla_put_failure; | 556 | goto nla_put_failure; |
| 557 | if (chan->flags & IEEE80211_CHAN_RADAR) { | 557 | if ((chan->flags & IEEE80211_CHAN_RADAR) && |
| 558 | u32 time = elapsed_jiffies_msecs(chan->dfs_state_entered); | 558 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) |
| 559 | if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) | ||
| 560 | goto nla_put_failure; | ||
| 561 | if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE, | ||
| 562 | chan->dfs_state)) | ||
| 563 | goto nla_put_failure; | ||
| 564 | if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time)) | ||
| 565 | goto nla_put_failure; | ||
| 566 | } | ||
| 567 | if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && | ||
| 568 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) | ||
| 569 | goto nla_put_failure; | ||
| 570 | if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) && | ||
| 571 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS)) | ||
| 572 | goto nla_put_failure; | ||
| 573 | if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) && | ||
| 574 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ)) | ||
| 575 | goto nla_put_failure; | ||
| 576 | if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) && | ||
| 577 | nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) | ||
| 578 | goto nla_put_failure; | 559 | goto nla_put_failure; |
| 579 | 560 | ||
| 580 | if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, | 561 | if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, |
| @@ -900,9 +881,6 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy, | |||
| 900 | nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, | 881 | nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, |
| 901 | c->max_interfaces)) | 882 | c->max_interfaces)) |
| 902 | goto nla_put_failure; | 883 | goto nla_put_failure; |
| 903 | if (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, | ||
| 904 | c->radar_detect_widths)) | ||
| 905 | goto nla_put_failure; | ||
| 906 | 884 | ||
| 907 | nla_nest_end(msg, nl_combi); | 885 | nla_nest_end(msg, nl_combi); |
| 908 | } | 886 | } |
| @@ -914,48 +892,6 @@ nla_put_failure: | |||
| 914 | return -ENOBUFS; | 892 | return -ENOBUFS; |
| 915 | } | 893 | } |
| 916 | 894 | ||
| 917 | #ifdef CONFIG_PM | ||
| 918 | static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev, | ||
| 919 | struct sk_buff *msg) | ||
| 920 | { | ||
| 921 | const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp; | ||
| 922 | struct nlattr *nl_tcp; | ||
| 923 | |||
| 924 | if (!tcp) | ||
| 925 | return 0; | ||
| 926 | |||
| 927 | nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION); | ||
| 928 | if (!nl_tcp) | ||
| 929 | return -ENOBUFS; | ||
| 930 | |||
| 931 | if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, | ||
| 932 | tcp->data_payload_max)) | ||
| 933 | return -ENOBUFS; | ||
| 934 | |||
| 935 | if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, | ||
| 936 | tcp->data_payload_max)) | ||
| 937 | return -ENOBUFS; | ||
| 938 | |||
| 939 | if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ)) | ||
| 940 | return -ENOBUFS; | ||
| 941 | |||
| 942 | if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN, | ||
| 943 | sizeof(*tcp->tok), tcp->tok)) | ||
| 944 | return -ENOBUFS; | ||
| 945 | |||
| 946 | if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL, | ||
| 947 | tcp->data_interval_max)) | ||
| 948 | return -ENOBUFS; | ||
| 949 | |||
| 950 | if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD, | ||
| 951 | tcp->wake_payload_max)) | ||
| 952 | return -ENOBUFS; | ||
| 953 | |||
| 954 | nla_nest_end(msg, nl_tcp); | ||
| 955 | return 0; | ||
| 956 | } | ||
| 957 | #endif | ||
| 958 | |||
| 959 | static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags, | 895 | static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags, |
| 960 | struct cfg80211_registered_device *dev) | 896 | struct cfg80211_registered_device *dev) |
| 961 | { | 897 | { |
| @@ -1330,9 +1266,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
| 1330 | goto nla_put_failure; | 1266 | goto nla_put_failure; |
| 1331 | } | 1267 | } |
| 1332 | 1268 | ||
| 1333 | if (nl80211_send_wowlan_tcp_caps(dev, msg)) | ||
| 1334 | goto nla_put_failure; | ||
| 1335 | |||
| 1336 | nla_nest_end(msg, nl_wowlan); | 1269 | nla_nest_end(msg, nl_wowlan); |
| 1337 | } | 1270 | } |
| 1338 | #endif | 1271 | #endif |
| @@ -1365,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
| 1365 | dev->wiphy.max_acl_mac_addrs)) | 1298 | dev->wiphy.max_acl_mac_addrs)) |
| 1366 | goto nla_put_failure; | 1299 | goto nla_put_failure; |
| 1367 | 1300 | ||
| 1368 | if (dev->wiphy.extended_capabilities && | ||
| 1369 | (nla_put(msg, NL80211_ATTR_EXT_CAPA, | ||
| 1370 | dev->wiphy.extended_capabilities_len, | ||
| 1371 | dev->wiphy.extended_capabilities) || | ||
| 1372 | nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, | ||
| 1373 | dev->wiphy.extended_capabilities_len, | ||
| 1374 | dev->wiphy.extended_capabilities_mask))) | ||
| 1375 | goto nla_put_failure; | ||
| 1376 | |||
| 1377 | return genlmsg_end(msg, hdr); | 1301 | return genlmsg_end(msg, hdr); |
| 1378 | 1302 | ||
| 1379 | nla_put_failure: | 1303 | nla_put_failure: |
| @@ -1383,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
| 1383 | 1307 | ||
| 1384 | static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) | 1308 | static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) |
| 1385 | { | 1309 | { |
| 1386 | int idx = 0; | 1310 | int idx = 0, ret; |
| 1387 | int start = cb->args[0]; | 1311 | int start = cb->args[0]; |
| 1388 | struct cfg80211_registered_device *dev; | 1312 | struct cfg80211_registered_device *dev; |
| 1389 | 1313 | ||
| @@ -1393,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1393 | continue; | 1317 | continue; |
| 1394 | if (++idx <= start) | 1318 | if (++idx <= start) |
| 1395 | continue; | 1319 | continue; |
| 1396 | if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, | 1320 | ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, |
| 1397 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 1321 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
| 1398 | dev) < 0) { | 1322 | dev); |
| 1323 | if (ret < 0) { | ||
| 1324 | /* | ||
| 1325 | * If sending the wiphy data didn't fit (ENOBUFS or | ||
| 1326 | * EMSGSIZE returned), this SKB is still empty (so | ||
| 1327 | * it's not too big because another wiphy dataset is | ||
| 1328 | * already in the skb) and we've not tried to adjust | ||
| 1329 | * the dump allocation yet ... then adjust the alloc | ||
| 1330 | * size to be bigger, and return 1 but with the empty | ||
| 1331 | * skb. This results in an empty message being RX'ed | ||
| 1332 | * in userspace, but that is ignored. | ||
| 1333 | * | ||
| 1334 | * We can then retry with the larger buffer. | ||
| 1335 | */ | ||
| 1336 | if ((ret == -ENOBUFS || ret == -EMSGSIZE) && | ||
| 1337 | !skb->len && | ||
| 1338 | cb->min_dump_alloc < 4096) { | ||
| 1339 | cb->min_dump_alloc = 4096; | ||
| 1340 | mutex_unlock(&cfg80211_mutex); | ||
| 1341 | return 1; | ||
| 1342 | } | ||
| 1399 | idx--; | 1343 | idx--; |
| 1400 | break; | 1344 | break; |
| 1401 | } | 1345 | } |
| @@ -1412,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) | |||
| 1412 | struct sk_buff *msg; | 1356 | struct sk_buff *msg; |
| 1413 | struct cfg80211_registered_device *dev = info->user_ptr[0]; | 1357 | struct cfg80211_registered_device *dev = info->user_ptr[0]; |
| 1414 | 1358 | ||
| 1415 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 1359 | msg = nlmsg_new(4096, GFP_KERNEL); |
| 1416 | if (!msg) | 1360 | if (!msg) |
| 1417 | return -ENOMEM; | 1361 | return -ENOMEM; |
| 1418 | 1362 | ||
| @@ -4758,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
| 4758 | if (!rdev->ops->scan) | 4702 | if (!rdev->ops->scan) |
| 4759 | return -EOPNOTSUPP; | 4703 | return -EOPNOTSUPP; |
| 4760 | 4704 | ||
| 4761 | if (rdev->scan_req) | 4705 | mutex_lock(&rdev->sched_scan_mtx); |
| 4762 | return -EBUSY; | 4706 | if (rdev->scan_req) { |
| 4707 | err = -EBUSY; | ||
| 4708 | goto unlock; | ||
| 4709 | } | ||
| 4763 | 4710 | ||
| 4764 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { | 4711 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { |
| 4765 | n_channels = validate_scan_freqs( | 4712 | n_channels = validate_scan_freqs( |
| 4766 | info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); | 4713 | info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); |
| 4767 | if (!n_channels) | 4714 | if (!n_channels) { |
| 4768 | return -EINVAL; | 4715 | err = -EINVAL; |
| 4716 | goto unlock; | ||
| 4717 | } | ||
| 4769 | } else { | 4718 | } else { |
| 4770 | enum ieee80211_band band; | 4719 | enum ieee80211_band band; |
| 4771 | n_channels = 0; | 4720 | n_channels = 0; |
| @@ -4779,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
| 4779 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) | 4728 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) |
| 4780 | n_ssids++; | 4729 | n_ssids++; |
| 4781 | 4730 | ||
| 4782 | if (n_ssids > wiphy->max_scan_ssids) | 4731 | if (n_ssids > wiphy->max_scan_ssids) { |
| 4783 | return -EINVAL; | 4732 | err = -EINVAL; |
| 4733 | goto unlock; | ||
| 4734 | } | ||
| 4784 | 4735 | ||
| 4785 | if (info->attrs[NL80211_ATTR_IE]) | 4736 | if (info->attrs[NL80211_ATTR_IE]) |
| 4786 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | 4737 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); |
| 4787 | else | 4738 | else |
| 4788 | ie_len = 0; | 4739 | ie_len = 0; |
| 4789 | 4740 | ||
| 4790 | if (ie_len > wiphy->max_scan_ie_len) | 4741 | if (ie_len > wiphy->max_scan_ie_len) { |
| 4791 | return -EINVAL; | 4742 | err = -EINVAL; |
| 4743 | goto unlock; | ||
| 4744 | } | ||
| 4792 | 4745 | ||
| 4793 | request = kzalloc(sizeof(*request) | 4746 | request = kzalloc(sizeof(*request) |
| 4794 | + sizeof(*request->ssids) * n_ssids | 4747 | + sizeof(*request->ssids) * n_ssids |
| 4795 | + sizeof(*request->channels) * n_channels | 4748 | + sizeof(*request->channels) * n_channels |
| 4796 | + ie_len, GFP_KERNEL); | 4749 | + ie_len, GFP_KERNEL); |
| 4797 | if (!request) | 4750 | if (!request) { |
| 4798 | return -ENOMEM; | 4751 | err = -ENOMEM; |
| 4752 | goto unlock; | ||
| 4753 | } | ||
| 4799 | 4754 | ||
| 4800 | if (n_ssids) | 4755 | if (n_ssids) |
| 4801 | request->ssids = (void *)&request->channels[n_channels]; | 4756 | request->ssids = (void *)&request->channels[n_channels]; |
| @@ -4932,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
| 4932 | kfree(request); | 4887 | kfree(request); |
| 4933 | } | 4888 | } |
| 4934 | 4889 | ||
| 4890 | unlock: | ||
| 4891 | mutex_unlock(&rdev->sched_scan_mtx); | ||
| 4935 | return err; | 4892 | return err; |
| 4936 | } | 4893 | } |
| 4937 | 4894 | ||
| @@ -7805,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info) | |||
| 7805 | if (!rdev->ops->stop_p2p_device) | 7762 | if (!rdev->ops->stop_p2p_device) |
| 7806 | return -EOPNOTSUPP; | 7763 | return -EOPNOTSUPP; |
| 7807 | 7764 | ||
| 7808 | if (!wdev->p2p_started) | 7765 | mutex_lock(&rdev->sched_scan_mtx); |
| 7809 | return 0; | 7766 | cfg80211_stop_p2p_device(rdev, wdev); |
| 7810 | 7767 | mutex_unlock(&rdev->sched_scan_mtx); | |
| 7811 | rdev_stop_p2p_device(rdev, wdev); | ||
| 7812 | wdev->p2p_started = false; | ||
| 7813 | |||
| 7814 | mutex_lock(&rdev->devlist_mtx); | ||
| 7815 | rdev->opencount--; | ||
| 7816 | mutex_unlock(&rdev->devlist_mtx); | ||
| 7817 | |||
| 7818 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { | ||
| 7819 | rdev->scan_req->aborted = true; | ||
| 7820 | ___cfg80211_scan_done(rdev, true); | ||
| 7821 | } | ||
| 7822 | 7768 | ||
| 7823 | return 0; | 7769 | return 0; |
| 7824 | } | 7770 | } |
| @@ -8542,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg, | |||
| 8542 | struct nlattr *nest; | 8488 | struct nlattr *nest; |
| 8543 | int i; | 8489 | int i; |
| 8544 | 8490 | ||
| 8545 | ASSERT_RDEV_LOCK(rdev); | 8491 | lockdep_assert_held(&rdev->sched_scan_mtx); |
| 8546 | 8492 | ||
| 8547 | if (WARN_ON(!req)) | 8493 | if (WARN_ON(!req)) |
| 8548 | return 0; | 8494 | return 0; |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 674aadca0079..fd99ea495b7e 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
| @@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) | |||
| 169 | union iwreq_data wrqu; | 169 | union iwreq_data wrqu; |
| 170 | #endif | 170 | #endif |
| 171 | 171 | ||
| 172 | ASSERT_RDEV_LOCK(rdev); | 172 | lockdep_assert_held(&rdev->sched_scan_mtx); |
| 173 | 173 | ||
| 174 | request = rdev->scan_req; | 174 | request = rdev->scan_req; |
| 175 | 175 | ||
| @@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk) | |||
| 230 | rdev = container_of(wk, struct cfg80211_registered_device, | 230 | rdev = container_of(wk, struct cfg80211_registered_device, |
| 231 | scan_done_wk); | 231 | scan_done_wk); |
| 232 | 232 | ||
| 233 | cfg80211_lock_rdev(rdev); | 233 | mutex_lock(&rdev->sched_scan_mtx); |
| 234 | ___cfg80211_scan_done(rdev, false); | 234 | ___cfg80211_scan_done(rdev, false); |
| 235 | cfg80211_unlock_rdev(rdev); | 235 | mutex_unlock(&rdev->sched_scan_mtx); |
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) | 238 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) |
| @@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
| 698 | found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); | 698 | found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); |
| 699 | 699 | ||
| 700 | if (found) { | 700 | if (found) { |
| 701 | found->pub.beacon_interval = tmp->pub.beacon_interval; | ||
| 702 | found->pub.signal = tmp->pub.signal; | ||
| 703 | found->pub.capability = tmp->pub.capability; | ||
| 704 | found->ts = tmp->ts; | ||
| 705 | |||
| 706 | /* Update IEs */ | 701 | /* Update IEs */ |
| 707 | if (rcu_access_pointer(tmp->pub.proberesp_ies)) { | 702 | if (rcu_access_pointer(tmp->pub.proberesp_ies)) { |
| 708 | const struct cfg80211_bss_ies *old; | 703 | const struct cfg80211_bss_ies *old; |
| @@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
| 723 | 718 | ||
| 724 | if (found->pub.hidden_beacon_bss && | 719 | if (found->pub.hidden_beacon_bss && |
| 725 | !list_empty(&found->hidden_list)) { | 720 | !list_empty(&found->hidden_list)) { |
| 721 | const struct cfg80211_bss_ies *f; | ||
| 722 | |||
| 726 | /* | 723 | /* |
| 727 | * The found BSS struct is one of the probe | 724 | * The found BSS struct is one of the probe |
| 728 | * response members of a group, but we're | 725 | * response members of a group, but we're |
| @@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
| 732 | * SSID to showing it, which is confusing so | 729 | * SSID to showing it, which is confusing so |
| 733 | * drop this information. | 730 | * drop this information. |
| 734 | */ | 731 | */ |
| 732 | |||
| 733 | f = rcu_access_pointer(tmp->pub.beacon_ies); | ||
| 734 | kfree_rcu((struct cfg80211_bss_ies *)f, | ||
| 735 | rcu_head); | ||
| 735 | goto drop; | 736 | goto drop; |
| 736 | } | 737 | } |
| 737 | 738 | ||
| @@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
| 761 | kfree_rcu((struct cfg80211_bss_ies *)old, | 762 | kfree_rcu((struct cfg80211_bss_ies *)old, |
| 762 | rcu_head); | 763 | rcu_head); |
| 763 | } | 764 | } |
| 765 | |||
| 766 | found->pub.beacon_interval = tmp->pub.beacon_interval; | ||
| 767 | found->pub.signal = tmp->pub.signal; | ||
| 768 | found->pub.capability = tmp->pub.capability; | ||
| 769 | found->ts = tmp->ts; | ||
| 764 | } else { | 770 | } else { |
| 765 | struct cfg80211_internal_bss *new; | 771 | struct cfg80211_internal_bss *new; |
| 766 | struct cfg80211_internal_bss *hidden; | 772 | struct cfg80211_internal_bss *hidden; |
| @@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
| 1056 | if (IS_ERR(rdev)) | 1062 | if (IS_ERR(rdev)) |
| 1057 | return PTR_ERR(rdev); | 1063 | return PTR_ERR(rdev); |
| 1058 | 1064 | ||
| 1065 | mutex_lock(&rdev->sched_scan_mtx); | ||
| 1059 | if (rdev->scan_req) { | 1066 | if (rdev->scan_req) { |
| 1060 | err = -EBUSY; | 1067 | err = -EBUSY; |
| 1061 | goto out; | 1068 | goto out; |
| @@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
| 1162 | dev_hold(dev); | 1169 | dev_hold(dev); |
| 1163 | } | 1170 | } |
| 1164 | out: | 1171 | out: |
| 1172 | mutex_unlock(&rdev->sched_scan_mtx); | ||
| 1165 | kfree(creq); | 1173 | kfree(creq); |
| 1166 | cfg80211_unlock_rdev(rdev); | 1174 | cfg80211_unlock_rdev(rdev); |
| 1167 | return err; | 1175 | return err; |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f432bd3755b1..482c70e70127 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
| @@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) | |||
| 85 | ASSERT_RTNL(); | 85 | ASSERT_RTNL(); |
| 86 | ASSERT_RDEV_LOCK(rdev); | 86 | ASSERT_RDEV_LOCK(rdev); |
| 87 | ASSERT_WDEV_LOCK(wdev); | 87 | ASSERT_WDEV_LOCK(wdev); |
| 88 | lockdep_assert_held(&rdev->sched_scan_mtx); | ||
| 88 | 89 | ||
| 89 | if (rdev->scan_req) | 90 | if (rdev->scan_req) |
| 90 | return -EBUSY; | 91 | return -EBUSY; |
| @@ -223,6 +224,7 @@ void cfg80211_conn_work(struct work_struct *work) | |||
| 223 | rtnl_lock(); | 224 | rtnl_lock(); |
| 224 | cfg80211_lock_rdev(rdev); | 225 | cfg80211_lock_rdev(rdev); |
| 225 | mutex_lock(&rdev->devlist_mtx); | 226 | mutex_lock(&rdev->devlist_mtx); |
| 227 | mutex_lock(&rdev->sched_scan_mtx); | ||
| 226 | 228 | ||
| 227 | list_for_each_entry(wdev, &rdev->wdev_list, list) { | 229 | list_for_each_entry(wdev, &rdev->wdev_list, list) { |
| 228 | wdev_lock(wdev); | 230 | wdev_lock(wdev); |
| @@ -247,6 +249,7 @@ void cfg80211_conn_work(struct work_struct *work) | |||
| 247 | wdev_unlock(wdev); | 249 | wdev_unlock(wdev); |
| 248 | } | 250 | } |
| 249 | 251 | ||
| 252 | mutex_unlock(&rdev->sched_scan_mtx); | ||
| 250 | mutex_unlock(&rdev->devlist_mtx); | 253 | mutex_unlock(&rdev->devlist_mtx); |
| 251 | cfg80211_unlock_rdev(rdev); | 254 | cfg80211_unlock_rdev(rdev); |
| 252 | rtnl_unlock(); | 255 | rtnl_unlock(); |
| @@ -320,11 +323,9 @@ void cfg80211_sme_scan_done(struct net_device *dev) | |||
| 320 | { | 323 | { |
| 321 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 324 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
| 322 | 325 | ||
| 323 | mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); | ||
| 324 | wdev_lock(wdev); | 326 | wdev_lock(wdev); |
| 325 | __cfg80211_sme_scan_done(dev); | 327 | __cfg80211_sme_scan_done(dev); |
| 326 | wdev_unlock(wdev); | 328 | wdev_unlock(wdev); |
| 327 | mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); | ||
| 328 | } | 329 | } |
| 329 | 330 | ||
| 330 | void cfg80211_sme_rx_auth(struct net_device *dev, | 331 | void cfg80211_sme_rx_auth(struct net_device *dev, |
| @@ -924,9 +925,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
| 924 | int err; | 925 | int err; |
| 925 | 926 | ||
| 926 | mutex_lock(&rdev->devlist_mtx); | 927 | mutex_lock(&rdev->devlist_mtx); |
| 928 | /* might request scan - scan_mtx -> wdev_mtx dependency */ | ||
| 929 | mutex_lock(&rdev->sched_scan_mtx); | ||
| 927 | wdev_lock(dev->ieee80211_ptr); | 930 | wdev_lock(dev->ieee80211_ptr); |
| 928 | err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); | 931 | err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); |
| 929 | wdev_unlock(dev->ieee80211_ptr); | 932 | wdev_unlock(dev->ieee80211_ptr); |
| 933 | mutex_unlock(&rdev->sched_scan_mtx); | ||
| 930 | mutex_unlock(&rdev->devlist_mtx); | 934 | mutex_unlock(&rdev->devlist_mtx); |
| 931 | 935 | ||
| 932 | return err; | 936 | return err; |
diff --git a/net/wireless/trace.h b/net/wireless/trace.h index b7a531380e19..7586de77a2f8 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h | |||
| @@ -27,7 +27,8 @@ | |||
| 27 | #define WIPHY_PR_ARG __entry->wiphy_name | 27 | #define WIPHY_PR_ARG __entry->wiphy_name |
| 28 | 28 | ||
| 29 | #define WDEV_ENTRY __field(u32, id) | 29 | #define WDEV_ENTRY __field(u32, id) |
| 30 | #define WDEV_ASSIGN (__entry->id) = (wdev ? wdev->identifier : 0) | 30 | #define WDEV_ASSIGN (__entry->id) = (!IS_ERR_OR_NULL(wdev) \ |
| 31 | ? wdev->identifier : 0) | ||
| 31 | #define WDEV_PR_FMT "wdev(%u)" | 32 | #define WDEV_PR_FMT "wdev(%u)" |
| 32 | #define WDEV_PR_ARG (__entry->id) | 33 | #define WDEV_PR_ARG (__entry->id) |
| 33 | 34 | ||
| @@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl, | |||
| 1778 | ), | 1779 | ), |
| 1779 | TP_fast_assign( | 1780 | TP_fast_assign( |
| 1780 | WIPHY_ASSIGN; | 1781 | WIPHY_ASSIGN; |
| 1781 | WIPHY_ASSIGN; | 1782 | NETDEV_ASSIGN; |
| 1782 | __entry->acl_policy = params->acl_policy; | 1783 | __entry->acl_policy = params->acl_policy; |
| 1783 | ), | 1784 | ), |
| 1784 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", | 1785 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", |
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index fb9622f6d99c..e79cb5c0655a 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c | |||
| @@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | |||
| 89 | 89 | ||
| 90 | cfg80211_lock_rdev(rdev); | 90 | cfg80211_lock_rdev(rdev); |
| 91 | mutex_lock(&rdev->devlist_mtx); | 91 | mutex_lock(&rdev->devlist_mtx); |
| 92 | mutex_lock(&rdev->sched_scan_mtx); | ||
| 92 | wdev_lock(wdev); | 93 | wdev_lock(wdev); |
| 93 | 94 | ||
| 94 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 95 | if (wdev->sme_state != CFG80211_SME_IDLE) { |
| @@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | |||
| 135 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 136 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
| 136 | out: | 137 | out: |
| 137 | wdev_unlock(wdev); | 138 | wdev_unlock(wdev); |
| 139 | mutex_unlock(&rdev->sched_scan_mtx); | ||
| 138 | mutex_unlock(&rdev->devlist_mtx); | 140 | mutex_unlock(&rdev->devlist_mtx); |
| 139 | cfg80211_unlock_rdev(rdev); | 141 | cfg80211_unlock_rdev(rdev); |
| 140 | return err; | 142 | return err; |
| @@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, | |||
| 190 | 192 | ||
| 191 | cfg80211_lock_rdev(rdev); | 193 | cfg80211_lock_rdev(rdev); |
| 192 | mutex_lock(&rdev->devlist_mtx); | 194 | mutex_lock(&rdev->devlist_mtx); |
| 195 | mutex_lock(&rdev->sched_scan_mtx); | ||
| 193 | wdev_lock(wdev); | 196 | wdev_lock(wdev); |
| 194 | 197 | ||
| 195 | err = 0; | 198 | err = 0; |
| @@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, | |||
| 223 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 226 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
| 224 | out: | 227 | out: |
| 225 | wdev_unlock(wdev); | 228 | wdev_unlock(wdev); |
| 229 | mutex_unlock(&rdev->sched_scan_mtx); | ||
| 226 | mutex_unlock(&rdev->devlist_mtx); | 230 | mutex_unlock(&rdev->devlist_mtx); |
| 227 | cfg80211_unlock_rdev(rdev); | 231 | cfg80211_unlock_rdev(rdev); |
| 228 | return err; | 232 | return err; |
| @@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, | |||
| 285 | 289 | ||
| 286 | cfg80211_lock_rdev(rdev); | 290 | cfg80211_lock_rdev(rdev); |
| 287 | mutex_lock(&rdev->devlist_mtx); | 291 | mutex_lock(&rdev->devlist_mtx); |
| 292 | mutex_lock(&rdev->sched_scan_mtx); | ||
| 288 | wdev_lock(wdev); | 293 | wdev_lock(wdev); |
| 289 | 294 | ||
| 290 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 295 | if (wdev->sme_state != CFG80211_SME_IDLE) { |
| @@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, | |||
| 313 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 318 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
| 314 | out: | 319 | out: |
| 315 | wdev_unlock(wdev); | 320 | wdev_unlock(wdev); |
| 321 | mutex_unlock(&rdev->sched_scan_mtx); | ||
| 316 | mutex_unlock(&rdev->devlist_mtx); | 322 | mutex_unlock(&rdev->devlist_mtx); |
| 317 | cfg80211_unlock_rdev(rdev); | 323 | cfg80211_unlock_rdev(rdev); |
| 318 | return err; | 324 | return err; |
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 35754cc8a9e5..8dafe6d3c6e4 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
| @@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) | |||
| 334 | x->xflags &= ~XFRM_TIME_DEFER; | 334 | x->xflags &= ~XFRM_TIME_DEFER; |
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) | ||
| 338 | { | ||
| 339 | u32 seq_diff, oseq_diff; | ||
| 340 | struct km_event c; | ||
| 341 | struct xfrm_replay_state_esn *replay_esn = x->replay_esn; | ||
| 342 | struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; | ||
| 343 | |||
| 344 | /* we send notify messages in case | ||
| 345 | * 1. we updated on of the sequence numbers, and the seqno difference | ||
| 346 | * is at least x->replay_maxdiff, in this case we also update the | ||
| 347 | * timeout of our timer function | ||
| 348 | * 2. if x->replay_maxage has elapsed since last update, | ||
| 349 | * and there were changes | ||
| 350 | * | ||
| 351 | * The state structure must be locked! | ||
| 352 | */ | ||
| 353 | |||
| 354 | switch (event) { | ||
| 355 | case XFRM_REPLAY_UPDATE: | ||
| 356 | if (!x->replay_maxdiff) | ||
| 357 | break; | ||
| 358 | |||
| 359 | if (replay_esn->seq_hi == preplay_esn->seq_hi) | ||
| 360 | seq_diff = replay_esn->seq - preplay_esn->seq; | ||
| 361 | else | ||
| 362 | seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; | ||
| 363 | |||
| 364 | if (replay_esn->oseq_hi == preplay_esn->oseq_hi) | ||
| 365 | oseq_diff = replay_esn->oseq - preplay_esn->oseq; | ||
| 366 | else | ||
| 367 | oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; | ||
| 368 | |||
| 369 | if (seq_diff < x->replay_maxdiff && | ||
| 370 | oseq_diff < x->replay_maxdiff) { | ||
| 371 | |||
| 372 | if (x->xflags & XFRM_TIME_DEFER) | ||
| 373 | event = XFRM_REPLAY_TIMEOUT; | ||
| 374 | else | ||
| 375 | return; | ||
| 376 | } | ||
| 377 | |||
| 378 | break; | ||
| 379 | |||
| 380 | case XFRM_REPLAY_TIMEOUT: | ||
| 381 | if (memcmp(x->replay_esn, x->preplay_esn, | ||
| 382 | xfrm_replay_state_esn_len(replay_esn)) == 0) { | ||
| 383 | x->xflags |= XFRM_TIME_DEFER; | ||
| 384 | return; | ||
| 385 | } | ||
| 386 | |||
| 387 | break; | ||
| 388 | } | ||
| 389 | |||
| 390 | memcpy(x->preplay_esn, x->replay_esn, | ||
| 391 | xfrm_replay_state_esn_len(replay_esn)); | ||
| 392 | c.event = XFRM_MSG_NEWAE; | ||
| 393 | c.data.aevent = event; | ||
| 394 | km_state_notify(x, &c); | ||
| 395 | |||
| 396 | if (x->replay_maxage && | ||
| 397 | !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) | ||
| 398 | x->xflags &= ~XFRM_TIME_DEFER; | ||
| 399 | } | ||
| 400 | |||
| 337 | static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) | 401 | static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) |
| 338 | { | 402 | { |
| 339 | int err = 0; | 403 | int err = 0; |
| @@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = { | |||
| 510 | .advance = xfrm_replay_advance_esn, | 574 | .advance = xfrm_replay_advance_esn, |
| 511 | .check = xfrm_replay_check_esn, | 575 | .check = xfrm_replay_check_esn, |
| 512 | .recheck = xfrm_replay_recheck_esn, | 576 | .recheck = xfrm_replay_recheck_esn, |
| 513 | .notify = xfrm_replay_notify_bmp, | 577 | .notify = xfrm_replay_notify_esn, |
| 514 | .overflow = xfrm_replay_overflow_esn, | 578 | .overflow = xfrm_replay_overflow_esn, |
| 515 | }; | 579 | }; |
| 516 | 580 | ||
