aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c5
-rw-r--r--net/8021q/vlan_dev.c13
-rw-r--r--net/appletalk/ddp.c3
-rw-r--r--net/bluetooth/hci_conn.c19
-rw-r--r--net/bluetooth/hci_event.c17
-rw-r--r--net/bluetooth/l2cap_core.c8
-rw-r--r--net/bluetooth/l2cap_sock.c5
-rw-r--r--net/bluetooth/mgmt.c104
-rw-r--r--net/bluetooth/smp.c69
-rw-r--r--net/core/dev.c30
-rw-r--r--net/core/dst.c16
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/iovec.c55
-rw-r--r--net/core/neighbour.c9
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/gre_demux.c1
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/ip_tunnel.c26
-rw-r--r--net/ipv4/route.c15
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/mcast.c13
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/mac80211/util.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c20
-rw-r--r--net/netfilter/nf_nat_core.c35
-rw-r--r--net/netfilter/nf_tables_api.c11
-rw-r--r--net/netfilter/nft_compat.c18
-rw-r--r--net/netfilter/nft_nat.c14
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/datapath.c27
-rw-r--r--net/openvswitch/flow.c4
-rw-r--r--net/openvswitch/flow.h5
-rw-r--r--net/openvswitch/flow_table.c16
-rw-r--r--net/openvswitch/flow_table.h3
-rw-r--r--net/openvswitch/vport-gre.c17
-rw-r--r--net/sctp/sysctl.c46
-rw-r--r--net/sctp/ulpevent.c122
-rw-r--r--net/sunrpc/auth.c1
-rw-r--r--net/tipc/bcast.c1
-rw-r--r--net/tipc/msg.c11
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c11
-rw-r--r--net/wireless/reg.c22
51 files changed, 462 insertions, 405 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 9012b1c922b6..75d427763992 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -114,8 +114,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_proto);
114 114
115static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) 115static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
116{ 116{
117 if (skb_cow(skb, skb_headroom(skb)) < 0) 117 if (skb_cow(skb, skb_headroom(skb)) < 0) {
118 kfree_skb(skb);
118 return NULL; 119 return NULL;
120 }
121
119 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); 122 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
120 skb->mac_header += VLAN_HLEN; 123 skb->mac_header += VLAN_HLEN;
121 return skb; 124 return skb;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ad2ac3c00398..dd11f612e03e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -627,8 +627,6 @@ static void vlan_dev_uninit(struct net_device *dev)
627 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 627 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
628 int i; 628 int i;
629 629
630 free_percpu(vlan->vlan_pcpu_stats);
631 vlan->vlan_pcpu_stats = NULL;
632 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 630 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
633 while ((pm = vlan->egress_priority_map[i]) != NULL) { 631 while ((pm = vlan->egress_priority_map[i]) != NULL) {
634 vlan->egress_priority_map[i] = pm->next; 632 vlan->egress_priority_map[i] = pm->next;
@@ -785,6 +783,15 @@ static const struct net_device_ops vlan_netdev_ops = {
785 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, 783 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
786}; 784};
787 785
786static void vlan_dev_free(struct net_device *dev)
787{
788 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
789
790 free_percpu(vlan->vlan_pcpu_stats);
791 vlan->vlan_pcpu_stats = NULL;
792 free_netdev(dev);
793}
794
788void vlan_setup(struct net_device *dev) 795void vlan_setup(struct net_device *dev)
789{ 796{
790 ether_setup(dev); 797 ether_setup(dev);
@@ -794,7 +801,7 @@ void vlan_setup(struct net_device *dev)
794 dev->tx_queue_len = 0; 801 dev->tx_queue_len = 0;
795 802
796 dev->netdev_ops = &vlan_netdev_ops; 803 dev->netdev_ops = &vlan_netdev_ops;
797 dev->destructor = free_netdev; 804 dev->destructor = vlan_dev_free;
798 dev->ethtool_ops = &vlan_ethtool_ops; 805 dev->ethtool_ops = &vlan_ethtool_ops;
799 806
800 memset(dev->broadcast, 0, ETH_ALEN); 807 memset(dev->broadcast, 0, ETH_ALEN);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 01a1082e02b3..bfcf6be1d665 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1489 goto drop; 1489 goto drop;
1490 1490
1491 /* Queue packet (standard) */ 1491 /* Queue packet (standard) */
1492 skb->sk = sock;
1493
1494 if (sock_queue_rcv_skb(sock, skb) < 0) 1492 if (sock_queue_rcv_skb(sock, skb) < 0)
1495 goto drop; 1493 goto drop;
1496 1494
@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1644 if (!skb) 1642 if (!skb)
1645 goto out; 1643 goto out;
1646 1644
1647 skb->sk = sk;
1648 skb_reserve(skb, ddp_dl->header_length); 1645 skb_reserve(skb, ddp_dl->header_length);
1649 skb_reserve(skb, dev->hard_header_len); 1646 skb_reserve(skb, dev->hard_header_len);
1650 skb->dev = dev; 1647 skb->dev = dev;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 8671bc79a35b..a7a27bc2c0b1 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -289,10 +289,20 @@ static void hci_conn_timeout(struct work_struct *work)
289{ 289{
290 struct hci_conn *conn = container_of(work, struct hci_conn, 290 struct hci_conn *conn = container_of(work, struct hci_conn,
291 disc_work.work); 291 disc_work.work);
292 int refcnt = atomic_read(&conn->refcnt);
292 293
293 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); 294 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
294 295
295 if (atomic_read(&conn->refcnt)) 296 WARN_ON(refcnt < 0);
297
298 /* FIXME: It was observed that in pairing failed scenario, refcnt
299 * drops below 0. Probably this is because l2cap_conn_del calls
300 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
301 * dropped. After that loop hci_chan_del is called which also drops
302 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
303 * otherwise drop it.
304 */
305 if (refcnt > 0)
296 return; 306 return;
297 307
298 switch (conn->state) { 308 switch (conn->state) {
@@ -610,11 +620,6 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
610 if (hci_update_random_address(req, false, &own_addr_type)) 620 if (hci_update_random_address(req, false, &own_addr_type))
611 return; 621 return;
612 622
613 /* Save the address type used for this connnection attempt so we able
614 * to retrieve this information if we need it.
615 */
616 conn->src_type = own_addr_type;
617
618 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); 623 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
619 cp.scan_window = cpu_to_le16(hdev->le_scan_window); 624 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
620 bacpy(&cp.peer_addr, &conn->dst); 625 bacpy(&cp.peer_addr, &conn->dst);
@@ -894,7 +899,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
894 /* If we're already encrypted set the REAUTH_PEND flag, 899 /* If we're already encrypted set the REAUTH_PEND flag,
895 * otherwise set the ENCRYPT_PEND. 900 * otherwise set the ENCRYPT_PEND.
896 */ 901 */
897 if (conn->key_type != 0xff) 902 if (conn->link_mode & HCI_LM_ENCRYPT)
898 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 903 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
899 else 904 else
900 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 905 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 21e5913d12e0..640c54ec1bd2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -48,6 +48,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
48 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 48 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY); 49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50 50
51 hci_dev_lock(hdev);
52 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
53 hci_dev_unlock(hdev);
54
51 hci_conn_check_pending(hdev); 55 hci_conn_check_pending(hdev);
52} 56}
53 57
@@ -3537,7 +3541,11 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3537 cp.authentication = conn->auth_type; 3541 cp.authentication = conn->auth_type;
3538 3542
3539 /* Request MITM protection if our IO caps allow it 3543 /* Request MITM protection if our IO caps allow it
3540 * except for the no-bonding case 3544 * except for the no-bonding case.
3545 * conn->auth_type is not updated here since
3546 * that might cause the user confirmation to be
3547 * rejected in case the remote doesn't have the
3548 * IO capabilities for MITM.
3541 */ 3549 */
3542 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 3550 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3543 cp.authentication != HCI_AT_NO_BONDING) 3551 cp.authentication != HCI_AT_NO_BONDING)
@@ -3628,8 +3636,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3628 3636
3629 /* If we're not the initiators request authorization to 3637 /* If we're not the initiators request authorization to
3630 * proceed from user space (mgmt_user_confirm with 3638 * proceed from user space (mgmt_user_confirm with
3631 * confirm_hint set to 1). */ 3639 * confirm_hint set to 1). The exception is if neither
3632 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3640 * side had MITM in which case we do auto-accept.
3641 */
3642 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3643 (loc_mitm || rem_mitm)) {
3633 BT_DBG("Confirming auto-accept as acceptor"); 3644 BT_DBG("Confirming auto-accept as acceptor");
3634 confirm_hint = 1; 3645 confirm_hint = 1;
3635 goto confirm; 3646 goto confirm;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 6eabbe05fe54..323f23cd2c37 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1663,7 +1663,13 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1663 kfree_skb(conn->rx_skb); 1663 kfree_skb(conn->rx_skb);
1664 1664
1665 skb_queue_purge(&conn->pending_rx); 1665 skb_queue_purge(&conn->pending_rx);
1666 flush_work(&conn->pending_rx_work); 1666
1667 /* We can not call flush_work(&conn->pending_rx_work) here since we
1668 * might block if we are running on a worker from the same workqueue
1669 * pending_rx_work is waiting on.
1670 */
1671 if (work_pending(&conn->pending_rx_work))
1672 cancel_work_sync(&conn->pending_rx_work);
1667 1673
1668 l2cap_unregister_all_users(conn); 1674 l2cap_unregister_all_users(conn);
1669 1675
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index ade3fb4c23bc..e1378693cc90 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -787,11 +787,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
787 787
788 /*change security for LE channels */ 788 /*change security for LE channels */
789 if (chan->scid == L2CAP_CID_ATT) { 789 if (chan->scid == L2CAP_CID_ATT) {
790 if (!conn->hcon->out) {
791 err = -EINVAL;
792 break;
793 }
794
795 if (smp_conn_security(conn->hcon, sec.level)) 790 if (smp_conn_security(conn->hcon, sec.level))
796 break; 791 break;
797 sk->sk_state = BT_CONFIG; 792 sk->sk_state = BT_CONFIG;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 0fce54412ffd..af8e0a6243b7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1047,6 +1047,43 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1047 } 1047 }
1048} 1048}
1049 1049
1050static void hci_stop_discovery(struct hci_request *req)
1051{
1052 struct hci_dev *hdev = req->hdev;
1053 struct hci_cp_remote_name_req_cancel cp;
1054 struct inquiry_entry *e;
1055
1056 switch (hdev->discovery.state) {
1057 case DISCOVERY_FINDING:
1058 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1059 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1060 } else {
1061 cancel_delayed_work(&hdev->le_scan_disable);
1062 hci_req_add_le_scan_disable(req);
1063 }
1064
1065 break;
1066
1067 case DISCOVERY_RESOLVING:
1068 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1069 NAME_PENDING);
1070 if (!e)
1071 return;
1072
1073 bacpy(&cp.bdaddr, &e->data.bdaddr);
1074 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1075 &cp);
1076
1077 break;
1078
1079 default:
1080 /* Passive scanning */
1081 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1082 hci_req_add_le_scan_disable(req);
1083 break;
1084 }
1085}
1086
1050static int clean_up_hci_state(struct hci_dev *hdev) 1087static int clean_up_hci_state(struct hci_dev *hdev)
1051{ 1088{
1052 struct hci_request req; 1089 struct hci_request req;
@@ -1063,9 +1100,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1063 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 1100 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1064 disable_advertising(&req); 1101 disable_advertising(&req);
1065 1102
1066 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { 1103 hci_stop_discovery(&req);
1067 hci_req_add_le_scan_disable(&req);
1068 }
1069 1104
1070 list_for_each_entry(conn, &hdev->conn_hash.list, list) { 1105 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1071 struct hci_cp_disconnect dc; 1106 struct hci_cp_disconnect dc;
@@ -2996,8 +3031,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2996 } 3031 }
2997 3032
2998 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { 3033 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2999 /* Continue with pairing via SMP */ 3034 /* Continue with pairing via SMP. The hdev lock must be
3035 * released as SMP may try to recquire it for crypto
3036 * purposes.
3037 */
3038 hci_dev_unlock(hdev);
3000 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 3039 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3040 hci_dev_lock(hdev);
3001 3041
3002 if (!err) 3042 if (!err)
3003 err = cmd_complete(sk, hdev->id, mgmt_op, 3043 err = cmd_complete(sk, hdev->id, mgmt_op,
@@ -3574,8 +3614,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3574{ 3614{
3575 struct mgmt_cp_stop_discovery *mgmt_cp = data; 3615 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3576 struct pending_cmd *cmd; 3616 struct pending_cmd *cmd;
3577 struct hci_cp_remote_name_req_cancel cp;
3578 struct inquiry_entry *e;
3579 struct hci_request req; 3617 struct hci_request req;
3580 int err; 3618 int err;
3581 3619
@@ -3605,52 +3643,22 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3605 3643
3606 hci_req_init(&req, hdev); 3644 hci_req_init(&req, hdev);
3607 3645
3608 switch (hdev->discovery.state) { 3646 hci_stop_discovery(&req);
3609 case DISCOVERY_FINDING:
3610 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3611 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3612 } else {
3613 cancel_delayed_work(&hdev->le_scan_disable);
3614
3615 hci_req_add_le_scan_disable(&req);
3616 }
3617
3618 break;
3619 3647
3620 case DISCOVERY_RESOLVING: 3648 err = hci_req_run(&req, stop_discovery_complete);
3621 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 3649 if (!err) {
3622 NAME_PENDING); 3650 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3623 if (!e) {
3624 mgmt_pending_remove(cmd);
3625 err = cmd_complete(sk, hdev->id,
3626 MGMT_OP_STOP_DISCOVERY, 0,
3627 &mgmt_cp->type,
3628 sizeof(mgmt_cp->type));
3629 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3630 goto unlock;
3631 }
3632
3633 bacpy(&cp.bdaddr, &e->data.bdaddr);
3634 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3635 &cp);
3636
3637 break;
3638
3639 default:
3640 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3641
3642 mgmt_pending_remove(cmd);
3643 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3644 MGMT_STATUS_FAILED, &mgmt_cp->type,
3645 sizeof(mgmt_cp->type));
3646 goto unlock; 3651 goto unlock;
3647 } 3652 }
3648 3653
3649 err = hci_req_run(&req, stop_discovery_complete); 3654 mgmt_pending_remove(cmd);
3650 if (err < 0) 3655
3651 mgmt_pending_remove(cmd); 3656 /* If no HCI commands were sent we're done */
3652 else 3657 if (err == -ENODATA) {
3653 hci_discovery_set_state(hdev, DISCOVERY_STOPPING); 3658 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3659 &mgmt_cp->type, sizeof(mgmt_cp->type));
3660 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3661 }
3654 3662
3655unlock: 3663unlock:
3656 hci_dev_unlock(hdev); 3664 hci_dev_unlock(hdev);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3d1cc164557d..e33a982161c1 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -385,6 +385,16 @@ static const u8 gen_method[5][5] = {
385 { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP }, 385 { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
386}; 386};
387 387
388static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
389{
390 /* If either side has unknown io_caps, use JUST WORKS */
391 if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
392 remote_io > SMP_IO_KEYBOARD_DISPLAY)
393 return JUST_WORKS;
394
395 return gen_method[remote_io][local_io];
396}
397
388static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, 398static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
389 u8 local_io, u8 remote_io) 399 u8 local_io, u8 remote_io)
390{ 400{
@@ -401,14 +411,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
401 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); 411 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
402 412
403 /* If neither side wants MITM, use JUST WORKS */ 413 /* If neither side wants MITM, use JUST WORKS */
404 /* If either side has unknown io_caps, use JUST WORKS */
405 /* Otherwise, look up method from the table */ 414 /* Otherwise, look up method from the table */
406 if (!(auth & SMP_AUTH_MITM) || 415 if (!(auth & SMP_AUTH_MITM))
407 local_io > SMP_IO_KEYBOARD_DISPLAY ||
408 remote_io > SMP_IO_KEYBOARD_DISPLAY)
409 method = JUST_WORKS; 416 method = JUST_WORKS;
410 else 417 else
411 method = gen_method[remote_io][local_io]; 418 method = get_auth_method(smp, local_io, remote_io);
412 419
413 /* If not bonding, don't ask user to confirm a Zero TK */ 420 /* If not bonding, don't ask user to confirm a Zero TK */
414 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM) 421 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -544,7 +551,7 @@ static u8 smp_random(struct smp_chan *smp)
544 hci_le_start_enc(hcon, ediv, rand, stk); 551 hci_le_start_enc(hcon, ediv, rand, stk);
545 hcon->enc_key_size = smp->enc_key_size; 552 hcon->enc_key_size = smp->enc_key_size;
546 } else { 553 } else {
547 u8 stk[16]; 554 u8 stk[16], auth;
548 __le64 rand = 0; 555 __le64 rand = 0;
549 __le16 ediv = 0; 556 __le16 ediv = 0;
550 557
@@ -556,8 +563,13 @@ static u8 smp_random(struct smp_chan *smp)
556 memset(stk + smp->enc_key_size, 0, 563 memset(stk + smp->enc_key_size, 0,
557 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); 564 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
558 565
566 if (hcon->pending_sec_level == BT_SECURITY_HIGH)
567 auth = 1;
568 else
569 auth = 0;
570
559 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, 571 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
560 HCI_SMP_STK_SLAVE, 0, stk, smp->enc_key_size, 572 HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size,
561 ediv, rand); 573 ediv, rand);
562 } 574 }
563 575
@@ -664,7 +676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
664{ 676{
665 struct smp_cmd_pairing rsp, *req = (void *) skb->data; 677 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
666 struct smp_chan *smp; 678 struct smp_chan *smp;
667 u8 key_size, auth; 679 u8 key_size, auth, sec_level;
668 int ret; 680 int ret;
669 681
670 BT_DBG("conn %p", conn); 682 BT_DBG("conn %p", conn);
@@ -690,7 +702,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
690 /* We didn't start the pairing, so match remote */ 702 /* We didn't start the pairing, so match remote */
691 auth = req->auth_req; 703 auth = req->auth_req;
692 704
693 conn->hcon->pending_sec_level = authreq_to_seclevel(auth); 705 sec_level = authreq_to_seclevel(auth);
706 if (sec_level > conn->hcon->pending_sec_level)
707 conn->hcon->pending_sec_level = sec_level;
708
709 /* If we need MITM check that it can be acheived */
710 if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
711 u8 method;
712
713 method = get_auth_method(smp, conn->hcon->io_capability,
714 req->io_capability);
715 if (method == JUST_WORKS || method == JUST_CFM)
716 return SMP_AUTH_REQUIREMENTS;
717 }
694 718
695 build_pairing_cmd(conn, req, &rsp, auth); 719 build_pairing_cmd(conn, req, &rsp, auth);
696 720
@@ -738,6 +762,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
738 if (check_enc_key_size(conn, key_size)) 762 if (check_enc_key_size(conn, key_size))
739 return SMP_ENC_KEY_SIZE; 763 return SMP_ENC_KEY_SIZE;
740 764
765 /* If we need MITM check that it can be acheived */
766 if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
767 u8 method;
768
769 method = get_auth_method(smp, req->io_capability,
770 rsp->io_capability);
771 if (method == JUST_WORKS || method == JUST_CFM)
772 return SMP_AUTH_REQUIREMENTS;
773 }
774
741 get_random_bytes(smp->prnd, sizeof(smp->prnd)); 775 get_random_bytes(smp->prnd, sizeof(smp->prnd));
742 776
743 smp->prsp[0] = SMP_CMD_PAIRING_RSP; 777 smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -833,6 +867,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
833 struct smp_cmd_pairing cp; 867 struct smp_cmd_pairing cp;
834 struct hci_conn *hcon = conn->hcon; 868 struct hci_conn *hcon = conn->hcon;
835 struct smp_chan *smp; 869 struct smp_chan *smp;
870 u8 sec_level;
836 871
837 BT_DBG("conn %p", conn); 872 BT_DBG("conn %p", conn);
838 873
@@ -842,7 +877,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
842 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 877 if (!(conn->hcon->link_mode & HCI_LM_MASTER))
843 return SMP_CMD_NOTSUPP; 878 return SMP_CMD_NOTSUPP;
844 879
845 hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); 880 sec_level = authreq_to_seclevel(rp->auth_req);
881 if (sec_level > hcon->pending_sec_level)
882 hcon->pending_sec_level = sec_level;
846 883
847 if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) 884 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
848 return 0; 885 return 0;
@@ -896,9 +933,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
896 if (smp_sufficient_security(hcon, sec_level)) 933 if (smp_sufficient_security(hcon, sec_level))
897 return 1; 934 return 1;
898 935
936 if (sec_level > hcon->pending_sec_level)
937 hcon->pending_sec_level = sec_level;
938
899 if (hcon->link_mode & HCI_LM_MASTER) 939 if (hcon->link_mode & HCI_LM_MASTER)
900 if (smp_ltk_encrypt(conn, sec_level)) 940 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
901 goto done; 941 return 0;
902 942
903 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) 943 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
904 return 0; 944 return 0;
@@ -913,7 +953,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
913 * requires it. 953 * requires it.
914 */ 954 */
915 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT || 955 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
916 sec_level > BT_SECURITY_MEDIUM) 956 hcon->pending_sec_level > BT_SECURITY_MEDIUM)
917 authreq |= SMP_AUTH_MITM; 957 authreq |= SMP_AUTH_MITM;
918 958
919 if (hcon->link_mode & HCI_LM_MASTER) { 959 if (hcon->link_mode & HCI_LM_MASTER) {
@@ -932,9 +972,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
932 972
933 set_bit(SMP_FLAG_INITIATOR, &smp->flags); 973 set_bit(SMP_FLAG_INITIATOR, &smp->flags);
934 974
935done:
936 hcon->pending_sec_level = sec_level;
937
938 return 0; 975 return 0;
939} 976}
940 977
diff --git a/net/core/dev.c b/net/core/dev.c
index 30eedf677913..7990984ca364 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
148static struct list_head offload_base __read_mostly; 148static struct list_head offload_base __read_mostly;
149 149
150static int netif_rx_internal(struct sk_buff *skb); 150static int netif_rx_internal(struct sk_buff *skb);
151static int call_netdevice_notifiers_info(unsigned long val,
152 struct net_device *dev,
153 struct netdev_notifier_info *info);
151 154
152/* 155/*
153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 156 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
1207void netdev_state_change(struct net_device *dev) 1210void netdev_state_change(struct net_device *dev)
1208{ 1211{
1209 if (dev->flags & IFF_UP) { 1212 if (dev->flags & IFF_UP) {
1210 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1213 struct netdev_notifier_change_info change_info;
1214
1215 change_info.flags_changed = 0;
1216 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1217 &change_info.info);
1211 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1218 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1212 } 1219 }
1213} 1220}
@@ -4227,9 +4234,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
4227#endif 4234#endif
4228 napi->weight = weight_p; 4235 napi->weight = weight_p;
4229 local_irq_disable(); 4236 local_irq_disable();
4230 while (work < quota) { 4237 while (1) {
4231 struct sk_buff *skb; 4238 struct sk_buff *skb;
4232 unsigned int qlen;
4233 4239
4234 while ((skb = __skb_dequeue(&sd->process_queue))) { 4240 while ((skb = __skb_dequeue(&sd->process_queue))) {
4235 local_irq_enable(); 4241 local_irq_enable();
@@ -4243,24 +4249,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
4243 } 4249 }
4244 4250
4245 rps_lock(sd); 4251 rps_lock(sd);
4246 qlen = skb_queue_len(&sd->input_pkt_queue); 4252 if (skb_queue_empty(&sd->input_pkt_queue)) {
4247 if (qlen)
4248 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4249 &sd->process_queue);
4250
4251 if (qlen < quota - work) {
4252 /* 4253 /*
4253 * Inline a custom version of __napi_complete(). 4254 * Inline a custom version of __napi_complete().
4254 * only current cpu owns and manipulates this napi, 4255 * only current cpu owns and manipulates this napi,
4255 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 4256 * and NAPI_STATE_SCHED is the only possible flag set
4256 * we can use a plain write instead of clear_bit(), 4257 * on backlog.
4258 * We can use a plain write instead of clear_bit(),
4257 * and we dont need an smp_mb() memory barrier. 4259 * and we dont need an smp_mb() memory barrier.
4258 */ 4260 */
4259 list_del(&napi->poll_list); 4261 list_del(&napi->poll_list);
4260 napi->state = 0; 4262 napi->state = 0;
4263 rps_unlock(sd);
4261 4264
4262 quota = work + qlen; 4265 break;
4263 } 4266 }
4267
4268 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4269 &sd->process_queue);
4264 rps_unlock(sd); 4270 rps_unlock(sd);
4265 } 4271 }
4266 local_irq_enable(); 4272 local_irq_enable();
diff --git a/net/core/dst.c b/net/core/dst.c
index 80d6286c8b62..a028409ee438 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -269,6 +269,15 @@ again:
269} 269}
270EXPORT_SYMBOL(dst_destroy); 270EXPORT_SYMBOL(dst_destroy);
271 271
272static void dst_destroy_rcu(struct rcu_head *head)
273{
274 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
275
276 dst = dst_destroy(dst);
277 if (dst)
278 __dst_free(dst);
279}
280
272void dst_release(struct dst_entry *dst) 281void dst_release(struct dst_entry *dst)
273{ 282{
274 if (dst) { 283 if (dst) {
@@ -276,11 +285,8 @@ void dst_release(struct dst_entry *dst)
276 285
277 newrefcnt = atomic_dec_return(&dst->__refcnt); 286 newrefcnt = atomic_dec_return(&dst->__refcnt);
278 WARN_ON(newrefcnt < 0); 287 WARN_ON(newrefcnt < 0);
279 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) { 288 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
280 dst = dst_destroy(dst); 289 call_rcu(&dst->rcu_head, dst_destroy_rcu);
281 if (dst)
282 __dst_free(dst);
283 }
284 } 290 }
285} 291}
286EXPORT_SYMBOL(dst_release); 292EXPORT_SYMBOL(dst_release);
diff --git a/net/core/filter.c b/net/core/filter.c
index 735fad897496..1dbf6462f766 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -840,11 +840,11 @@ int sk_convert_filter(struct sock_filter *prog, int len,
840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
842 842
843 if (len <= 0 || len >= BPF_MAXINSNS) 843 if (len <= 0 || len > BPF_MAXINSNS)
844 return -EINVAL; 844 return -EINVAL;
845 845
846 if (new_prog) { 846 if (new_prog) {
847 addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL); 847 addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
848 if (!addrs) 848 if (!addrs)
849 return -ENOMEM; 849 return -ENOMEM;
850 } 850 }
@@ -1101,7 +1101,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
1101 1101
1102 BUILD_BUG_ON(BPF_MEMWORDS > 16); 1102 BUILD_BUG_ON(BPF_MEMWORDS > 16);
1103 1103
1104 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); 1104 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
1105 if (!masks) 1105 if (!masks)
1106 return -ENOMEM; 1106 return -ENOMEM;
1107 1107
@@ -1382,7 +1382,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL); 1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
1383 if (fp_new) { 1383 if (fp_new) {
1384 *fp_new = *fp; 1384 *fp_new = *fp;
1385 /* As we're kepping orig_prog in fp_new along, 1385 /* As we're keeping orig_prog in fp_new along,
1386 * we need to make sure we're not evicting it 1386 * we need to make sure we're not evicting it
1387 * from the old fp. 1387 * from the old fp.
1388 */ 1388 */
@@ -1524,8 +1524,8 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1524 1524
1525/** 1525/**
1526 * sk_unattached_filter_create - create an unattached filter 1526 * sk_unattached_filter_create - create an unattached filter
1527 * @fprog: the filter program
1528 * @pfp: the unattached filter that is created 1527 * @pfp: the unattached filter that is created
1528 * @fprog: the filter program
1529 * 1529 *
1530 * Create a filter independent of any socket. We first run some 1530 * Create a filter independent of any socket. We first run some
1531 * sanity checks on it to make sure it does not explode on us later. 1531 * sanity checks on it to make sure it does not explode on us later.
diff --git a/net/core/iovec.c b/net/core/iovec.c
index b61869429f4c..827dd6beb49c 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -75,61 +75,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
75} 75}
76 76
77/* 77/*
78 * Copy kernel to iovec. Returns -EFAULT on error.
79 */
80
81int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
82 int offset, int len)
83{
84 int copy;
85 for (; len > 0; ++iov) {
86 /* Skip over the finished iovecs */
87 if (unlikely(offset >= iov->iov_len)) {
88 offset -= iov->iov_len;
89 continue;
90 }
91 copy = min_t(unsigned int, iov->iov_len - offset, len);
92 if (copy_to_user(iov->iov_base + offset, kdata, copy))
93 return -EFAULT;
94 offset = 0;
95 kdata += copy;
96 len -= copy;
97 }
98
99 return 0;
100}
101EXPORT_SYMBOL(memcpy_toiovecend);
102
103/*
104 * Copy iovec to kernel. Returns -EFAULT on error.
105 */
106
107int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
108 int offset, int len)
109{
110 /* Skip over the finished iovecs */
111 while (offset >= iov->iov_len) {
112 offset -= iov->iov_len;
113 iov++;
114 }
115
116 while (len > 0) {
117 u8 __user *base = iov->iov_base + offset;
118 int copy = min_t(unsigned int, len, iov->iov_len - offset);
119
120 offset = 0;
121 if (copy_from_user(kdata, base, copy))
122 return -EFAULT;
123 len -= copy;
124 kdata += copy;
125 iov++;
126 }
127
128 return 0;
129}
130EXPORT_SYMBOL(memcpy_fromiovecend);
131
132/*
133 * And now for the all-in-one: copy and checksum from a user iovec 78 * And now for the all-in-one: copy and checksum from a user iovec
134 * directly to a datagram 79 * directly to a datagram
135 * Calls to csum_partial but the last must be in 32 bit chunks 80 * Calls to csum_partial but the last must be in 32 bit chunks
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 32d872eec7f5..559890b0f0a2 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3061 } else { 3061 } else {
3062 struct neigh_table *tbl = p->tbl;
3062 dev_name_source = "default"; 3063 dev_name_source = "default";
3063 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); 3064 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3064 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; 3065 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3065 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; 3066 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3066 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3; 3067 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3067 } 3068 }
3068 3069
3069 if (handler) { 3070 if (handler) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9cd5344fad73..c1a33033cbe2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2993,7 +2993,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2993 skb_put(nskb, len), 2993 skb_put(nskb, len),
2994 len, 0); 2994 len, 0);
2995 SKB_GSO_CB(nskb)->csum_start = 2995 SKB_GSO_CB(nskb)->csum_start =
2996 skb_headroom(nskb) + offset; 2996 skb_headroom(nskb) + doffset;
2997 continue; 2997 continue;
2998 } 2998 }
2999 2999
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4e9619bca732..0485bf7f8f03 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -68,6 +68,7 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
68 68
69 skb_push(skb, hdr_len); 69 skb_push(skb, hdr_len);
70 70
71 skb_reset_transport_header(skb);
71 greh = (struct gre_base_hdr *)skb->data; 72 greh = (struct gre_base_hdr *)skb->data;
72 greh->flags = tnl_flags_to_gre_flags(tpi->flags); 73 greh->flags = tnl_flags_to_gre_flags(tpi->flags);
73 greh->protocol = tpi->proto; 74 greh->protocol = tpi->proto;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 79c3d947a481..42b7bcf8045b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -739,8 +739,6 @@ static void icmp_unreach(struct sk_buff *skb)
739 /* fall through */ 739 /* fall through */
740 case 0: 740 case 0:
741 info = ntohs(icmph->un.frag.mtu); 741 info = ntohs(icmph->un.frag.mtu);
742 if (!info)
743 goto out;
744 } 742 }
745 break; 743 break;
746 case ICMP_SR_FAILED: 744 case ICMP_SR_FAILED:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6748d420f714..db710b059bab 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1944,6 +1944,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1944 1944
1945 rtnl_lock(); 1945 rtnl_lock();
1946 in_dev = ip_mc_find_dev(net, imr); 1946 in_dev = ip_mc_find_dev(net, imr);
1947 if (!in_dev) {
1948 ret = -ENODEV;
1949 goto out;
1950 }
1947 ifindex = imr->imr_ifindex; 1951 ifindex = imr->imr_ifindex;
1948 for (imlp = &inet->mc_list; 1952 for (imlp = &inet->mc_list;
1949 (iml = rtnl_dereference(*imlp)) != NULL; 1953 (iml = rtnl_dereference(*imlp)) != NULL;
@@ -1961,16 +1965,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1961 1965
1962 *imlp = iml->next_rcu; 1966 *imlp = iml->next_rcu;
1963 1967
1964 if (in_dev) 1968 ip_mc_dec_group(in_dev, group);
1965 ip_mc_dec_group(in_dev, group);
1966 rtnl_unlock(); 1969 rtnl_unlock();
1967 /* decrease mem now to avoid the memleak warning */ 1970 /* decrease mem now to avoid the memleak warning */
1968 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 1971 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1969 kfree_rcu(iml, rcu); 1972 kfree_rcu(iml, rcu);
1970 return 0; 1973 return 0;
1971 } 1974 }
1972 if (!in_dev) 1975out:
1973 ret = -ENODEV;
1974 rtnl_unlock(); 1976 rtnl_unlock();
1975 return ret; 1977 return ret;
1976} 1978}
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 097b3e7c1e8f..6f9de61dce5f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
73{ 73{
74 struct dst_entry *old_dst; 74 struct dst_entry *old_dst;
75 75
76 if (dst) { 76 dst_clone(dst);
77 if (dst->flags & DST_NOCACHE)
78 dst = NULL;
79 else
80 dst_clone(dst);
81 }
82 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst); 77 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
83 dst_release(old_dst); 78 dst_release(old_dst);
84} 79}
@@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
108 103
109 rcu_read_lock(); 104 rcu_read_lock();
110 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); 105 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
106 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
107 dst = NULL;
111 if (dst) { 108 if (dst) {
112 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 109 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
113 rcu_read_unlock();
114 tunnel_dst_reset(t); 110 tunnel_dst_reset(t);
115 return NULL; 111 dst_release(dst);
112 dst = NULL;
116 } 113 }
117 dst_hold(dst);
118 } 114 }
119 rcu_read_unlock(); 115 rcu_read_unlock();
120 return (struct rtable *)dst; 116 return (struct rtable *)dst;
@@ -173,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
173 169
174 hlist_for_each_entry_rcu(t, head, hash_node) { 170 hlist_for_each_entry_rcu(t, head, hash_node) {
175 if (remote != t->parms.iph.daddr || 171 if (remote != t->parms.iph.daddr ||
172 t->parms.iph.saddr != 0 ||
176 !(t->dev->flags & IFF_UP)) 173 !(t->dev->flags & IFF_UP))
177 continue; 174 continue;
178 175
@@ -189,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
189 head = &itn->tunnels[hash]; 186 head = &itn->tunnels[hash];
190 187
191 hlist_for_each_entry_rcu(t, head, hash_node) { 188 hlist_for_each_entry_rcu(t, head, hash_node) {
192 if ((local != t->parms.iph.saddr && 189 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
193 (local != t->parms.iph.daddr || 190 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
194 !ipv4_is_multicast(local))) || 191 continue;
195 !(t->dev->flags & IFF_UP)) 192
193 if (!(t->dev->flags & IFF_UP))
196 continue; 194 continue;
197 195
198 if (!ip_tunnel_key_match(&t->parms, flags, key)) 196 if (!ip_tunnel_key_match(&t->parms, flags, key))
@@ -209,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
209 207
210 hlist_for_each_entry_rcu(t, head, hash_node) { 208 hlist_for_each_entry_rcu(t, head, hash_node) {
211 if (t->parms.i_key != key || 209 if (t->parms.i_key != key ||
210 t->parms.iph.saddr != 0 ||
211 t->parms.iph.daddr != 0 ||
212 !(t->dev->flags & IFF_UP)) 212 !(t->dev->flags & IFF_UP))
213 continue; 213 continue;
214 214
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 082239ffe34a..3162ea923ded 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1010,7 +1010,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1010 const struct iphdr *iph = (const struct iphdr *) skb->data; 1010 const struct iphdr *iph = (const struct iphdr *) skb->data;
1011 struct flowi4 fl4; 1011 struct flowi4 fl4;
1012 struct rtable *rt; 1012 struct rtable *rt;
1013 struct dst_entry *dst; 1013 struct dst_entry *odst = NULL;
1014 bool new = false; 1014 bool new = false;
1015 1015
1016 bh_lock_sock(sk); 1016 bh_lock_sock(sk);
@@ -1018,16 +1018,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1018 if (!ip_sk_accept_pmtu(sk)) 1018 if (!ip_sk_accept_pmtu(sk))
1019 goto out; 1019 goto out;
1020 1020
1021 rt = (struct rtable *) __sk_dst_get(sk); 1021 odst = sk_dst_get(sk);
1022 1022
1023 if (sock_owned_by_user(sk) || !rt) { 1023 if (sock_owned_by_user(sk) || !odst) {
1024 __ipv4_sk_update_pmtu(skb, sk, mtu); 1024 __ipv4_sk_update_pmtu(skb, sk, mtu);
1025 goto out; 1025 goto out;
1026 } 1026 }
1027 1027
1028 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); 1028 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1029 1029
1030 if (!__sk_dst_check(sk, 0)) { 1030 rt = (struct rtable *)odst;
1031 if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
1031 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1032 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1032 if (IS_ERR(rt)) 1033 if (IS_ERR(rt))
1033 goto out; 1034 goto out;
@@ -1037,8 +1038,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1037 1038
1038 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); 1039 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1039 1040
1040 dst = dst_check(&rt->dst, 0); 1041 if (!dst_check(&rt->dst, 0)) {
1041 if (!dst) {
1042 if (new) 1042 if (new)
1043 dst_release(&rt->dst); 1043 dst_release(&rt->dst);
1044 1044
@@ -1050,10 +1050,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1050 } 1050 }
1051 1051
1052 if (new) 1052 if (new)
1053 __sk_dst_set(sk, &rt->dst); 1053 sk_dst_set(sk, &rt->dst);
1054 1054
1055out: 1055out:
1056 bh_unlock_sock(sk); 1056 bh_unlock_sock(sk);
1057 dst_release(odst);
1057} 1058}
1058EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1059EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1059 1060
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eb1dde37e678..9d2118e5fbc7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1108 if (unlikely(tp->repair)) { 1108 if (unlikely(tp->repair)) {
1109 if (tp->repair_queue == TCP_RECV_QUEUE) { 1109 if (tp->repair_queue == TCP_RECV_QUEUE) {
1110 copied = tcp_send_rcvq(sk, msg, size); 1110 copied = tcp_send_rcvq(sk, msg, size);
1111 goto out; 1111 goto out_nopush;
1112 } 1112 }
1113 1113
1114 err = -EINVAL; 1114 err = -EINVAL;
@@ -1282,6 +1282,7 @@ wait_for_memory:
1282out: 1282out:
1283 if (copied) 1283 if (copied)
1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1285out_nopush:
1285 release_sock(sk); 1286 release_sock(sk);
1286 return copied + copied_syn; 1287 return copied + copied_syn;
1287 1288
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 62e48cf84e60..9771563ab564 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -131,7 +131,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
131 struct dst_entry *dst, 131 struct dst_entry *dst,
132 struct request_sock *req) 132 struct request_sock *req)
133{ 133{
134 struct tcp_sock *tp = tcp_sk(sk); 134 struct tcp_sock *tp;
135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
136 struct sock *child; 136 struct sock *child;
137 137
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 40661fc1e233..40639c288dc2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1106 } 1106 }
1107 1107
1108 /* D-SACK for already forgotten data... Do dumb counting. */ 1108 /* D-SACK for already forgotten data... Do dumb counting. */
1109 if (dup_sack && tp->undo_marker && tp->undo_retrans && 1109 if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
1110 !after(end_seq_0, prior_snd_una) && 1110 !after(end_seq_0, prior_snd_una) &&
1111 after(end_seq_0, tp->undo_marker)) 1111 after(end_seq_0, tp->undo_marker))
1112 tp->undo_retrans--; 1112 tp->undo_retrans--;
@@ -1162,7 +1162,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1162 unsigned int new_len = (pkt_len / mss) * mss; 1162 unsigned int new_len = (pkt_len / mss) * mss;
1163 if (!in_sack && new_len < pkt_len) { 1163 if (!in_sack && new_len < pkt_len) {
1164 new_len += mss; 1164 new_len += mss;
1165 if (new_len > skb->len) 1165 if (new_len >= skb->len)
1166 return 0; 1166 return 0;
1167 } 1167 }
1168 pkt_len = new_len; 1168 pkt_len = new_len;
@@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
1187 1187
1188 /* Account D-SACK for retransmitted packet. */ 1188 /* Account D-SACK for retransmitted packet. */
1189 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1189 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1190 if (tp->undo_marker && tp->undo_retrans && 1190 if (tp->undo_marker && tp->undo_retrans > 0 &&
1191 after(end_seq, tp->undo_marker)) 1191 after(end_seq, tp->undo_marker))
1192 tp->undo_retrans--; 1192 tp->undo_retrans--;
1193 if (sacked & TCPCB_SACKED_ACKED) 1193 if (sacked & TCPCB_SACKED_ACKED)
@@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
1893 tp->lost_out = 0; 1893 tp->lost_out = 0;
1894 1894
1895 tp->undo_marker = 0; 1895 tp->undo_marker = 0;
1896 tp->undo_retrans = 0; 1896 tp->undo_retrans = -1;
1897} 1897}
1898 1898
1899void tcp_clear_retrans(struct tcp_sock *tp) 1899void tcp_clear_retrans(struct tcp_sock *tp)
@@ -2665,7 +2665,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2665 2665
2666 tp->prior_ssthresh = 0; 2666 tp->prior_ssthresh = 0;
2667 tp->undo_marker = tp->snd_una; 2667 tp->undo_marker = tp->snd_una;
2668 tp->undo_retrans = tp->retrans_out; 2668 tp->undo_retrans = tp->retrans_out ? : -1;
2669 2669
2670 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2670 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2671 if (!ece_ack) 2671 if (!ece_ack)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d92bce0ea24e..179b51e6bda3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2525,8 +2525,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2525 if (!tp->retrans_stamp) 2525 if (!tp->retrans_stamp)
2526 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2526 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2527 2527
2528 tp->undo_retrans += tcp_skb_pcount(skb);
2529
2530 /* snd_nxt is stored to detect loss of retransmitted segment, 2528 /* snd_nxt is stored to detect loss of retransmitted segment,
2531 * see tcp_input.c tcp_sacktag_write_queue(). 2529 * see tcp_input.c tcp_sacktag_write_queue().
2532 */ 2530 */
@@ -2534,6 +2532,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2534 } else if (err != -EBUSY) { 2532 } else if (err != -EBUSY) {
2535 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2533 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2536 } 2534 }
2535
2536 if (tp->undo_retrans < 0)
2537 tp->undo_retrans = 0;
2538 tp->undo_retrans += tcp_skb_pcount(skb);
2537 return err; 2539 return err;
2538} 2540}
2539 2541
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d92f94b7e402..7d5a8661df76 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1588,8 +1588,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1588 goto csum_error; 1588 goto csum_error;
1589 1589
1590 1590
1591 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) 1591 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
1592 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1593 is_udplite);
1592 goto drop; 1594 goto drop;
1595 }
1593 1596
1594 rc = 0; 1597 rc = 0;
1595 1598
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 08b367c6b9cf..617f0958e164 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1301,8 +1301,17 @@ int igmp6_event_query(struct sk_buff *skb)
1301 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); 1301 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1302 len -= skb_network_header_len(skb); 1302 len -= skb_network_header_len(skb);
1303 1303
1304 /* Drop queries with not link local source */ 1304 /* RFC3810 6.2
1305 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) 1305 * Upon reception of an MLD message that contains a Query, the node
1306 * checks if the source address of the message is a valid link-local
1307 * address, if the Hop Limit is set to 1, and if the Router Alert
1308 * option is present in the Hop-By-Hop Options header of the IPv6
1309 * packet. If any of these checks fails, the packet is dropped.
1310 */
1311 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1312 ipv6_hdr(skb)->hop_limit != 1 ||
1313 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1314 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1306 return -EINVAL; 1315 return -EINVAL;
1307 1316
1308 idev = __in6_dev_get(skb->dev); 1317 idev = __in6_dev_get(skb->dev);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 95c834799288..7092ff78fd84 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -674,8 +674,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
674 goto csum_error; 674 goto csum_error;
675 } 675 }
676 676
677 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) 677 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
678 UDP6_INC_STATS_BH(sock_net(sk),
679 UDP_MIB_RCVBUFERRORS, is_udplite);
678 goto drop; 680 goto drop;
681 }
679 682
680 skb_dst_drop(skb); 683 skb_dst_drop(skb);
681 684
@@ -690,6 +693,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
690 bh_unlock_sock(sk); 693 bh_unlock_sock(sk);
691 694
692 return rc; 695 return rc;
696
693csum_error: 697csum_error:
694 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 698 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
695drop: 699drop:
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 950909f04ee6..13752d96275e 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1365 int err; 1365 int err;
1366 1366
1367 if (level != SOL_PPPOL2TP) 1367 if (level != SOL_PPPOL2TP)
1368 return udp_prot.setsockopt(sk, level, optname, optval, optlen); 1368 return -EINVAL;
1369 1369
1370 if (optlen < sizeof(int)) 1370 if (optlen < sizeof(int))
1371 return -EINVAL; 1371 return -EINVAL;
@@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1491 struct pppol2tp_session *ps; 1491 struct pppol2tp_session *ps;
1492 1492
1493 if (level != SOL_PPPOL2TP) 1493 if (level != SOL_PPPOL2TP)
1494 return udp_prot.getsockopt(sk, level, optname, optval, optlen); 1494 return -EINVAL;
1495 1495
1496 if (get_user(len, optlen)) 1496 if (get_user(len, optlen))
1497 return -EFAULT; 1497 return -EFAULT;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 6886601afe1c..a6cda52ed920 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1096,11 +1096,12 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1096 int err; 1096 int err;
1097 1097
1098 /* 24 + 6 = header + auth_algo + auth_transaction + status_code */ 1098 /* 24 + 6 = header + auth_algo + auth_transaction + status_code */
1099 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len); 1099 skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN +
1100 24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN);
1100 if (!skb) 1101 if (!skb)
1101 return; 1102 return;
1102 1103
1103 skb_reserve(skb, local->hw.extra_tx_headroom); 1104 skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
1104 1105
1105 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); 1106 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
1106 memset(mgmt, 0, 24 + 6); 1107 memset(mgmt, 0, 24 + 6);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c42e83d2751c..581a6584ed0c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3778,6 +3778,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3778 cancel_delayed_work_sync(&ipvs->defense_work); 3778 cancel_delayed_work_sync(&ipvs->defense_work);
3779 cancel_work_sync(&ipvs->defense_work.work); 3779 cancel_work_sync(&ipvs->defense_work.work);
3780 unregister_net_sysctl_table(ipvs->sysctl_hdr); 3780 unregister_net_sysctl_table(ipvs->sysctl_hdr);
3781 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3781} 3782}
3782 3783
3783#else 3784#else
@@ -3840,7 +3841,6 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3840 struct netns_ipvs *ipvs = net_ipvs(net); 3841 struct netns_ipvs *ipvs = net_ipvs(net);
3841 3842
3842 ip_vs_trash_cleanup(net); 3843 ip_vs_trash_cleanup(net);
3843 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3844 ip_vs_control_net_cleanup_sysctl(net); 3844 ip_vs_control_net_cleanup_sysctl(net);
3845 remove_proc_entry("ip_vs_stats_percpu", net->proc_net); 3845 remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
3846 remove_proc_entry("ip_vs_stats", net->proc_net); 3846 remove_proc_entry("ip_vs_stats", net->proc_net);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 58579634427d..300ed1eec729 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -597,6 +597,9 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
597#ifdef CONFIG_NF_CONNTRACK_MARK 597#ifdef CONFIG_NF_CONNTRACK_MARK
598 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 598 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
599#endif 599#endif
600#ifdef CONFIG_NF_CONNTRACK_ZONES
601 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
602#endif
600 + ctnetlink_proto_size(ct) 603 + ctnetlink_proto_size(ct)
601 + ctnetlink_label_size(ct) 604 + ctnetlink_label_size(ct)
602 ; 605 ;
@@ -1150,7 +1153,7 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
1150static int 1153static int
1151ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying) 1154ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
1152{ 1155{
1153 struct nf_conn *ct, *last = NULL; 1156 struct nf_conn *ct, *last;
1154 struct nf_conntrack_tuple_hash *h; 1157 struct nf_conntrack_tuple_hash *h;
1155 struct hlist_nulls_node *n; 1158 struct hlist_nulls_node *n;
1156 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1159 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
@@ -1163,8 +1166,7 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
1163 if (cb->args[2]) 1166 if (cb->args[2])
1164 return 0; 1167 return 0;
1165 1168
1166 if (cb->args[0] == nr_cpu_ids) 1169 last = (struct nf_conn *)cb->args[1];
1167 return 0;
1168 1170
1169 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { 1171 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1170 struct ct_pcpu *pcpu; 1172 struct ct_pcpu *pcpu;
@@ -1174,7 +1176,6 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
1174 1176
1175 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); 1177 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1176 spin_lock_bh(&pcpu->lock); 1178 spin_lock_bh(&pcpu->lock);
1177 last = (struct nf_conn *)cb->args[1];
1178 list = dying ? &pcpu->dying : &pcpu->unconfirmed; 1179 list = dying ? &pcpu->dying : &pcpu->unconfirmed;
1179restart: 1180restart:
1180 hlist_nulls_for_each_entry(h, n, list, hnnode) { 1181 hlist_nulls_for_each_entry(h, n, list, hnnode) {
@@ -1193,7 +1194,9 @@ restart:
1193 ct); 1194 ct);
1194 rcu_read_unlock(); 1195 rcu_read_unlock();
1195 if (res < 0) { 1196 if (res < 0) {
1196 nf_conntrack_get(&ct->ct_general); 1197 if (!atomic_inc_not_zero(&ct->ct_general.use))
1198 continue;
1199 cb->args[0] = cpu;
1197 cb->args[1] = (unsigned long)ct; 1200 cb->args[1] = (unsigned long)ct;
1198 spin_unlock_bh(&pcpu->lock); 1201 spin_unlock_bh(&pcpu->lock);
1199 goto out; 1202 goto out;
@@ -1202,10 +1205,10 @@ restart:
1202 if (cb->args[1]) { 1205 if (cb->args[1]) {
1203 cb->args[1] = 0; 1206 cb->args[1] = 0;
1204 goto restart; 1207 goto restart;
1205 } else 1208 }
1206 cb->args[2] = 1;
1207 spin_unlock_bh(&pcpu->lock); 1209 spin_unlock_bh(&pcpu->lock);
1208 } 1210 }
1211 cb->args[2] = 1;
1209out: 1212out:
1210 if (last) 1213 if (last)
1211 nf_ct_put(last); 1214 nf_ct_put(last);
@@ -2040,6 +2043,9 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
2040#ifdef CONFIG_NF_CONNTRACK_MARK 2043#ifdef CONFIG_NF_CONNTRACK_MARK
2041 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 2044 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2042#endif 2045#endif
2046#ifdef CONFIG_NF_CONNTRACK_ZONES
2047 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
2048#endif
2043 + ctnetlink_proto_size(ct) 2049 + ctnetlink_proto_size(ct)
2044 ; 2050 ;
2045} 2051}
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 09096a670c45..a49907b1dabc 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -525,6 +525,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
525 return i->status & IPS_NAT_MASK ? 1 : 0; 525 return i->status & IPS_NAT_MASK ? 1 : 0;
526} 526}
527 527
528static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
529{
530 struct nf_conn_nat *nat = nfct_nat(ct);
531
532 if (nf_nat_proto_remove(ct, data))
533 return 1;
534
535 if (!nat || !nat->ct)
536 return 0;
537
538 /* This netns is being destroyed, and conntrack has nat null binding.
539 * Remove it from bysource hash, as the table will be freed soon.
540 *
541 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
542 * will delete entry from already-freed table.
543 */
544 if (!del_timer(&ct->timeout))
545 return 1;
546
547 spin_lock_bh(&nf_nat_lock);
548 hlist_del_rcu(&nat->bysource);
549 ct->status &= ~IPS_NAT_DONE_MASK;
550 nat->ct = NULL;
551 spin_unlock_bh(&nf_nat_lock);
552
553 add_timer(&ct->timeout);
554
555 /* don't delete conntrack. Although that would make things a lot
556 * simpler, we'd end up flushing all conntracks on nat rmmod.
557 */
558 return 0;
559}
560
528static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) 561static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
529{ 562{
530 struct nf_nat_proto_clean clean = { 563 struct nf_nat_proto_clean clean = {
@@ -795,7 +828,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
795{ 828{
796 struct nf_nat_proto_clean clean = {}; 829 struct nf_nat_proto_clean clean = {};
797 830
798 nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0); 831 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
799 synchronize_rcu(); 832 synchronize_rcu();
800 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); 833 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
801} 834}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 624e083125b9..ab4566cfcbe4 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1730,6 +1730,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1730 if (!create || nlh->nlmsg_flags & NLM_F_REPLACE) 1730 if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
1731 return -EINVAL; 1731 return -EINVAL;
1732 handle = nf_tables_alloc_handle(table); 1732 handle = nf_tables_alloc_handle(table);
1733
1734 if (chain->use == UINT_MAX)
1735 return -EOVERFLOW;
1733 } 1736 }
1734 1737
1735 if (nla[NFTA_RULE_POSITION]) { 1738 if (nla[NFTA_RULE_POSITION]) {
@@ -1789,14 +1792,15 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1789 1792
1790 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 1793 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
1791 if (nft_rule_is_active_next(net, old_rule)) { 1794 if (nft_rule_is_active_next(net, old_rule)) {
1792 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, 1795 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
1793 old_rule); 1796 old_rule);
1794 if (trans == NULL) { 1797 if (trans == NULL) {
1795 err = -ENOMEM; 1798 err = -ENOMEM;
1796 goto err2; 1799 goto err2;
1797 } 1800 }
1798 nft_rule_disactivate_next(net, old_rule); 1801 nft_rule_disactivate_next(net, old_rule);
1799 list_add_tail(&rule->list, &old_rule->list); 1802 chain->use--;
1803 list_add_tail_rcu(&rule->list, &old_rule->list);
1800 } else { 1804 } else {
1801 err = -ENOENT; 1805 err = -ENOENT;
1802 goto err2; 1806 goto err2;
@@ -1826,6 +1830,7 @@ err3:
1826 list_del_rcu(&nft_trans_rule(trans)->list); 1830 list_del_rcu(&nft_trans_rule(trans)->list);
1827 nft_rule_clear(net, nft_trans_rule(trans)); 1831 nft_rule_clear(net, nft_trans_rule(trans));
1828 nft_trans_destroy(trans); 1832 nft_trans_destroy(trans);
1833 chain->use++;
1829 } 1834 }
1830err2: 1835err2:
1831 nf_tables_rule_destroy(&ctx, rule); 1836 nf_tables_rule_destroy(&ctx, rule);
@@ -2845,7 +2850,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
2845 goto nla_put_failure; 2850 goto nla_put_failure;
2846 2851
2847 nfmsg = nlmsg_data(nlh); 2852 nfmsg = nlmsg_data(nlh);
2848 nfmsg->nfgen_family = NFPROTO_UNSPEC; 2853 nfmsg->nfgen_family = ctx.afi->family;
2849 nfmsg->version = NFNETLINK_V0; 2854 nfmsg->version = NFNETLINK_V0;
2850 nfmsg->res_id = 0; 2855 nfmsg->res_id = 0;
2851 2856
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 8a779be832fb..1840989092ed 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -195,6 +195,15 @@ static void
195nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 195nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
196{ 196{
197 struct xt_target *target = expr->ops->data; 197 struct xt_target *target = expr->ops->data;
198 void *info = nft_expr_priv(expr);
199 struct xt_tgdtor_param par;
200
201 par.net = ctx->net;
202 par.target = target;
203 par.targinfo = info;
204 par.family = ctx->afi->family;
205 if (par.target->destroy != NULL)
206 par.target->destroy(&par);
198 207
199 module_put(target->me); 208 module_put(target->me);
200} 209}
@@ -382,6 +391,15 @@ static void
382nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 391nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
383{ 392{
384 struct xt_match *match = expr->ops->data; 393 struct xt_match *match = expr->ops->data;
394 void *info = nft_expr_priv(expr);
395 struct xt_mtdtor_param par;
396
397 par.net = ctx->net;
398 par.match = match;
399 par.matchinfo = info;
400 par.family = ctx->afi->family;
401 if (par.match->destroy != NULL)
402 par.match->destroy(&par);
385 403
386 module_put(match->me); 404 module_put(match->me);
387} 405}
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index a0195d28bcfc..79ff58cd36dc 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -175,12 +175,14 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
175 if (nla_put_be32(skb, 175 if (nla_put_be32(skb,
176 NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max))) 176 NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
177 goto nla_put_failure; 177 goto nla_put_failure;
178 if (nla_put_be32(skb, 178 if (priv->sreg_proto_min) {
179 NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min))) 179 if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN,
180 goto nla_put_failure; 180 htonl(priv->sreg_proto_min)))
181 if (nla_put_be32(skb, 181 goto nla_put_failure;
182 NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max))) 182 if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
183 goto nla_put_failure; 183 htonl(priv->sreg_proto_max)))
184 goto nla_put_failure;
185 }
184 return 0; 186 return 0;
185 187
186nla_put_failure: 188nla_put_failure:
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 15c731f03fa6..e6fac7e3db52 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
636 while (nlk->cb_running && netlink_dump_space(nlk)) { 636 while (nlk->cb_running && netlink_dump_space(nlk)) {
637 err = netlink_dump(sk); 637 err = netlink_dump(sk);
638 if (err < 0) { 638 if (err < 0) {
639 sk->sk_err = err; 639 sk->sk_err = -err;
640 sk->sk_error_report(sk); 640 sk->sk_error_report(sk);
641 break; 641 break;
642 } 642 }
@@ -2483,7 +2483,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2483 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { 2483 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2484 ret = netlink_dump(sk); 2484 ret = netlink_dump(sk);
2485 if (ret) { 2485 if (ret) {
2486 sk->sk_err = ret; 2486 sk->sk_err = -ret;
2487 sk->sk_error_report(sk); 2487 sk->sk_error_report(sk);
2488 } 2488 }
2489 } 2489 }
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index c36856a457ca..e70d8b18e962 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -551,6 +551,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
551 551
552 case OVS_ACTION_ATTR_SAMPLE: 552 case OVS_ACTION_ATTR_SAMPLE:
553 err = sample(dp, skb, a); 553 err = sample(dp, skb, a);
554 if (unlikely(err)) /* skb already freed. */
555 return err;
554 break; 556 break;
555 } 557 }
556 558
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 0d407bca81e3..9db4bf6740d1 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2013 Nicira, Inc. 2 * Copyright (c) 2007-2014 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -276,7 +276,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
276 OVS_CB(skb)->flow = flow; 276 OVS_CB(skb)->flow = flow;
277 OVS_CB(skb)->pkt_key = &key; 277 OVS_CB(skb)->pkt_key = &key;
278 278
279 ovs_flow_stats_update(OVS_CB(skb)->flow, skb); 279 ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
280 ovs_execute_actions(dp, skb); 280 ovs_execute_actions(dp, skb);
281 stats_counter = &stats->n_hit; 281 stats_counter = &stats->n_hit;
282 282
@@ -889,8 +889,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
889 } 889 }
890 /* The unmasked key has to be the same for flow updates. */ 890 /* The unmasked key has to be the same for flow updates. */
891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { 891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
892 error = -EEXIST; 892 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
893 goto err_unlock_ovs; 893 if (!flow) {
894 error = -ENOENT;
895 goto err_unlock_ovs;
896 }
894 } 897 }
895 /* Update actions. */ 898 /* Update actions. */
896 old_acts = ovsl_dereference(flow->sf_acts); 899 old_acts = ovsl_dereference(flow->sf_acts);
@@ -981,16 +984,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
981 goto err_unlock_ovs; 984 goto err_unlock_ovs;
982 } 985 }
983 /* Check that the flow exists. */ 986 /* Check that the flow exists. */
984 flow = ovs_flow_tbl_lookup(&dp->table, &key); 987 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
985 if (unlikely(!flow)) { 988 if (unlikely(!flow)) {
986 error = -ENOENT; 989 error = -ENOENT;
987 goto err_unlock_ovs; 990 goto err_unlock_ovs;
988 } 991 }
989 /* The unmasked key has to be the same for flow updates. */ 992
990 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
991 error = -EEXIST;
992 goto err_unlock_ovs;
993 }
994 /* Update actions, if present. */ 993 /* Update actions, if present. */
995 if (likely(acts)) { 994 if (likely(acts)) {
996 old_acts = ovsl_dereference(flow->sf_acts); 995 old_acts = ovsl_dereference(flow->sf_acts);
@@ -1063,8 +1062,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1063 goto unlock; 1062 goto unlock;
1064 } 1063 }
1065 1064
1066 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1065 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1067 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { 1066 if (!flow) {
1068 err = -ENOENT; 1067 err = -ENOENT;
1069 goto unlock; 1068 goto unlock;
1070 } 1069 }
@@ -1113,8 +1112,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1113 goto unlock; 1112 goto unlock;
1114 } 1113 }
1115 1114
1116 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1115 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1117 if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) { 1116 if (unlikely(!flow)) {
1118 err = -ENOENT; 1117 err = -ENOENT;
1119 goto unlock; 1118 goto unlock;
1120 } 1119 }
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 334751cb1528..d07ab538fc9d 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -61,10 +61,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
61 61
62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) 62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
63 63
64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) 64void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
65 struct sk_buff *skb)
65{ 66{
66 struct flow_stats *stats; 67 struct flow_stats *stats;
67 __be16 tcp_flags = flow->key.tp.flags;
68 int node = numa_node_id(); 68 int node = numa_node_id();
69 69
70 stats = rcu_dereference(flow->stats[node]); 70 stats = rcu_dereference(flow->stats[node]);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index ac395d2cd821..5e5aaed3a85b 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2013 Nicira, Inc. 2 * Copyright (c) 2007-2014 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -180,7 +180,8 @@ struct arp_eth_header {
180 unsigned char ar_tip[4]; /* target IP address */ 180 unsigned char ar_tip[4]; /* target IP address */
181} __packed; 181} __packed;
182 182
183void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *); 183void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
184 struct sk_buff *);
184void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, 185void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
185 unsigned long *used, __be16 *tcp_flags); 186 unsigned long *used, __be16 *tcp_flags);
186void ovs_flow_stats_clear(struct sw_flow *); 187void ovs_flow_stats_clear(struct sw_flow *);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 574c3abc9b30..cf2d853646f0 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -456,6 +456,22 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); 456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
457} 457}
458 458
459struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
460 struct sw_flow_match *match)
461{
462 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
463 struct sw_flow_mask *mask;
464 struct sw_flow *flow;
465
466 /* Always called under ovs-mutex. */
467 list_for_each_entry(mask, &tbl->mask_list, list) {
468 flow = masked_flow_lookup(ti, match->key, mask);
469 if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
470 return flow;
471 }
472 return NULL;
473}
474
459int ovs_flow_tbl_num_masks(const struct flow_table *table) 475int ovs_flow_tbl_num_masks(const struct flow_table *table)
460{ 476{
461 struct sw_flow_mask *mask; 477 struct sw_flow_mask *mask;
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index ca8a5820f615..5918bff7f3f6 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -76,7 +76,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
76 u32 *n_mask_hit); 76 u32 *n_mask_hit);
77struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, 77struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
78 const struct sw_flow_key *); 78 const struct sw_flow_key *);
79 79struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
80 struct sw_flow_match *match);
80bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, 81bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
81 struct sw_flow_match *match); 82 struct sw_flow_match *match);
82 83
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 35ec4fed09e2..f49148a07da2 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -110,6 +110,22 @@ static int gre_rcv(struct sk_buff *skb,
110 return PACKET_RCVD; 110 return PACKET_RCVD;
111} 111}
112 112
113/* Called with rcu_read_lock and BH disabled. */
114static int gre_err(struct sk_buff *skb, u32 info,
115 const struct tnl_ptk_info *tpi)
116{
117 struct ovs_net *ovs_net;
118 struct vport *vport;
119
120 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
121 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
122
123 if (unlikely(!vport))
124 return PACKET_REJECT;
125 else
126 return PACKET_RCVD;
127}
128
113static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) 129static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
114{ 130{
115 struct net *net = ovs_dp_get_net(vport->dp); 131 struct net *net = ovs_dp_get_net(vport->dp);
@@ -186,6 +202,7 @@ error:
186 202
187static struct gre_cisco_protocol gre_protocol = { 203static struct gre_cisco_protocol gre_protocol = {
188 .handler = gre_rcv, 204 .handler = gre_rcv,
205 .err_handler = gre_err,
189 .priority = 1, 206 .priority = 1,
190}; 207};
191 208
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index dcb19592761e..12c7e01c2677 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -321,41 +321,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
321 loff_t *ppos) 321 loff_t *ppos)
322{ 322{
323 struct net *net = current->nsproxy->net_ns; 323 struct net *net = current->nsproxy->net_ns;
324 char tmp[8];
325 struct ctl_table tbl; 324 struct ctl_table tbl;
326 int ret; 325 bool changed = false;
327 int changed = 0;
328 char *none = "none"; 326 char *none = "none";
327 char tmp[8];
328 int ret;
329 329
330 memset(&tbl, 0, sizeof(struct ctl_table)); 330 memset(&tbl, 0, sizeof(struct ctl_table));
331 331
332 if (write) { 332 if (write) {
333 tbl.data = tmp; 333 tbl.data = tmp;
334 tbl.maxlen = 8; 334 tbl.maxlen = sizeof(tmp);
335 } else { 335 } else {
336 tbl.data = net->sctp.sctp_hmac_alg ? : none; 336 tbl.data = net->sctp.sctp_hmac_alg ? : none;
337 tbl.maxlen = strlen(tbl.data); 337 tbl.maxlen = strlen(tbl.data);
338 } 338 }
339 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
340 339
341 if (write) { 340 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
341 if (write && ret == 0) {
342#ifdef CONFIG_CRYPTO_MD5 342#ifdef CONFIG_CRYPTO_MD5
343 if (!strncmp(tmp, "md5", 3)) { 343 if (!strncmp(tmp, "md5", 3)) {
344 net->sctp.sctp_hmac_alg = "md5"; 344 net->sctp.sctp_hmac_alg = "md5";
345 changed = 1; 345 changed = true;
346 } 346 }
347#endif 347#endif
348#ifdef CONFIG_CRYPTO_SHA1 348#ifdef CONFIG_CRYPTO_SHA1
349 if (!strncmp(tmp, "sha1", 4)) { 349 if (!strncmp(tmp, "sha1", 4)) {
350 net->sctp.sctp_hmac_alg = "sha1"; 350 net->sctp.sctp_hmac_alg = "sha1";
351 changed = 1; 351 changed = true;
352 } 352 }
353#endif 353#endif
354 if (!strncmp(tmp, "none", 4)) { 354 if (!strncmp(tmp, "none", 4)) {
355 net->sctp.sctp_hmac_alg = NULL; 355 net->sctp.sctp_hmac_alg = NULL;
356 changed = 1; 356 changed = true;
357 } 357 }
358
359 if (!changed) 358 if (!changed)
360 ret = -EINVAL; 359 ret = -EINVAL;
361 } 360 }
@@ -368,11 +367,10 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
368 loff_t *ppos) 367 loff_t *ppos)
369{ 368{
370 struct net *net = current->nsproxy->net_ns; 369 struct net *net = current->nsproxy->net_ns;
371 int new_value;
372 struct ctl_table tbl;
373 unsigned int min = *(unsigned int *) ctl->extra1; 370 unsigned int min = *(unsigned int *) ctl->extra1;
374 unsigned int max = *(unsigned int *) ctl->extra2; 371 unsigned int max = *(unsigned int *) ctl->extra2;
375 int ret; 372 struct ctl_table tbl;
373 int ret, new_value;
376 374
377 memset(&tbl, 0, sizeof(struct ctl_table)); 375 memset(&tbl, 0, sizeof(struct ctl_table));
378 tbl.maxlen = sizeof(unsigned int); 376 tbl.maxlen = sizeof(unsigned int);
@@ -381,12 +379,15 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
381 tbl.data = &new_value; 379 tbl.data = &new_value;
382 else 380 else
383 tbl.data = &net->sctp.rto_min; 381 tbl.data = &net->sctp.rto_min;
382
384 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 383 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
385 if (write) { 384 if (write && ret == 0) {
386 if (ret || new_value > max || new_value < min) 385 if (new_value > max || new_value < min)
387 return -EINVAL; 386 return -EINVAL;
387
388 net->sctp.rto_min = new_value; 388 net->sctp.rto_min = new_value;
389 } 389 }
390
390 return ret; 391 return ret;
391} 392}
392 393
@@ -395,11 +396,10 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
395 loff_t *ppos) 396 loff_t *ppos)
396{ 397{
397 struct net *net = current->nsproxy->net_ns; 398 struct net *net = current->nsproxy->net_ns;
398 int new_value;
399 struct ctl_table tbl;
400 unsigned int min = *(unsigned int *) ctl->extra1; 399 unsigned int min = *(unsigned int *) ctl->extra1;
401 unsigned int max = *(unsigned int *) ctl->extra2; 400 unsigned int max = *(unsigned int *) ctl->extra2;
402 int ret; 401 struct ctl_table tbl;
402 int ret, new_value;
403 403
404 memset(&tbl, 0, sizeof(struct ctl_table)); 404 memset(&tbl, 0, sizeof(struct ctl_table));
405 tbl.maxlen = sizeof(unsigned int); 405 tbl.maxlen = sizeof(unsigned int);
@@ -408,12 +408,15 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
408 tbl.data = &new_value; 408 tbl.data = &new_value;
409 else 409 else
410 tbl.data = &net->sctp.rto_max; 410 tbl.data = &net->sctp.rto_max;
411
411 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 412 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
412 if (write) { 413 if (write && ret == 0) {
413 if (ret || new_value > max || new_value < min) 414 if (new_value > max || new_value < min)
414 return -EINVAL; 415 return -EINVAL;
416
415 net->sctp.rto_max = new_value; 417 net->sctp.rto_max = new_value;
416 } 418 }
419
417 return ret; 420 return ret;
418} 421}
419 422
@@ -444,8 +447,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
444 tbl.data = &net->sctp.auth_enable; 447 tbl.data = &net->sctp.auth_enable;
445 448
446 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 449 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
447 450 if (write && ret == 0) {
448 if (write) {
449 struct sock *sk = net->sctp.ctl_sock; 451 struct sock *sk = net->sctp.ctl_sock;
450 452
451 net->sctp.auth_enable = new_value; 453 net->sctp.auth_enable = new_value;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c64658bd0b..b6842fdb53d4 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -366,9 +366,10 @@ fail:
366 * specification [SCTP] and any extensions for a list of possible 366 * specification [SCTP] and any extensions for a list of possible
367 * error formats. 367 * error formats.
368 */ 368 */
369struct sctp_ulpevent *sctp_ulpevent_make_remote_error( 369struct sctp_ulpevent *
370 const struct sctp_association *asoc, struct sctp_chunk *chunk, 370sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
371 __u16 flags, gfp_t gfp) 371 struct sctp_chunk *chunk, __u16 flags,
372 gfp_t gfp)
372{ 373{
373 struct sctp_ulpevent *event; 374 struct sctp_ulpevent *event;
374 struct sctp_remote_error *sre; 375 struct sctp_remote_error *sre;
@@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
387 /* Copy the skb to a new skb with room for us to prepend 388 /* Copy the skb to a new skb with room for us to prepend
388 * notification with. 389 * notification with.
389 */ 390 */
390 skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error), 391 skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
391 0, gfp);
392 392
393 /* Pull off the rest of the cause TLV from the chunk. */ 393 /* Pull off the rest of the cause TLV from the chunk. */
394 skb_pull(chunk->skb, elen); 394 skb_pull(chunk->skb, elen);
@@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
399 event = sctp_skb2event(skb); 399 event = sctp_skb2event(skb);
400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); 400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
401 401
402 sre = (struct sctp_remote_error *) 402 sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
403 skb_push(skb, sizeof(struct sctp_remote_error));
404 403
405 /* Trim the buffer to the right length. */ 404 /* Trim the buffer to the right length. */
406 skb_trim(skb, sizeof(struct sctp_remote_error) + elen); 405 skb_trim(skb, sizeof(*sre) + elen);
407 406
408 /* Socket Extensions for SCTP 407 /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
409 * 5.3.1.3 SCTP_REMOTE_ERROR 408 memset(sre, 0, sizeof(*sre));
410 *
411 * sre_type:
412 * It should be SCTP_REMOTE_ERROR.
413 */
414 sre->sre_type = SCTP_REMOTE_ERROR; 409 sre->sre_type = SCTP_REMOTE_ERROR;
415
416 /*
417 * Socket Extensions for SCTP
418 * 5.3.1.3 SCTP_REMOTE_ERROR
419 *
420 * sre_flags: 16 bits (unsigned integer)
421 * Currently unused.
422 */
423 sre->sre_flags = 0; 410 sre->sre_flags = 0;
424
425 /* Socket Extensions for SCTP
426 * 5.3.1.3 SCTP_REMOTE_ERROR
427 *
428 * sre_length: sizeof (__u32)
429 *
430 * This field is the total length of the notification data,
431 * including the notification header.
432 */
433 sre->sre_length = skb->len; 411 sre->sre_length = skb->len;
434
435 /* Socket Extensions for SCTP
436 * 5.3.1.3 SCTP_REMOTE_ERROR
437 *
438 * sre_error: 16 bits (unsigned integer)
439 * This value represents one of the Operational Error causes defined in
440 * the SCTP specification, in network byte order.
441 */
442 sre->sre_error = cause; 412 sre->sre_error = cause;
443
444 /* Socket Extensions for SCTP
445 * 5.3.1.3 SCTP_REMOTE_ERROR
446 *
447 * sre_assoc_id: sizeof (sctp_assoc_t)
448 *
449 * The association id field, holds the identifier for the association.
450 * All notifications for a given association have the same association
451 * identifier. For TCP style socket, this field is ignored.
452 */
453 sctp_ulpevent_set_owner(event, asoc); 413 sctp_ulpevent_set_owner(event, asoc);
454 sre->sre_assoc_id = sctp_assoc2id(asoc); 414 sre->sre_assoc_id = sctp_assoc2id(asoc);
455 415
456 return event; 416 return event;
457
458fail: 417fail:
459 return NULL; 418 return NULL;
460} 419}
@@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
899 return notification->sn_header.sn_type; 858 return notification->sn_header.sn_type;
900} 859}
901 860
902/* Copy out the sndrcvinfo into a msghdr. */ 861/* RFC6458, Section 5.3.2. SCTP Header Information Structure
862 * (SCTP_SNDRCV, DEPRECATED)
863 */
903void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, 864void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
904 struct msghdr *msghdr) 865 struct msghdr *msghdr)
905{ 866{
@@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
908 if (sctp_ulpevent_is_notification(event)) 869 if (sctp_ulpevent_is_notification(event))
909 return; 870 return;
910 871
911 /* Sockets API Extensions for SCTP 872 memset(&sinfo, 0, sizeof(sinfo));
912 * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
913 *
914 * sinfo_stream: 16 bits (unsigned integer)
915 *
916 * For recvmsg() the SCTP stack places the message's stream number in
917 * this value.
918 */
919 sinfo.sinfo_stream = event->stream; 873 sinfo.sinfo_stream = event->stream;
920 /* sinfo_ssn: 16 bits (unsigned integer)
921 *
922 * For recvmsg() this value contains the stream sequence number that
923 * the remote endpoint placed in the DATA chunk. For fragmented
924 * messages this is the same number for all deliveries of the message
925 * (if more than one recvmsg() is needed to read the message).
926 */
927 sinfo.sinfo_ssn = event->ssn; 874 sinfo.sinfo_ssn = event->ssn;
928 /* sinfo_ppid: 32 bits (unsigned integer)
929 *
930 * In recvmsg() this value is
931 * the same information that was passed by the upper layer in the peer
932 * application. Please note that byte order issues are NOT accounted
933 * for and this information is passed opaquely by the SCTP stack from
934 * one end to the other.
935 */
936 sinfo.sinfo_ppid = event->ppid; 875 sinfo.sinfo_ppid = event->ppid;
937 /* sinfo_flags: 16 bits (unsigned integer)
938 *
939 * This field may contain any of the following flags and is composed of
940 * a bitwise OR of these values.
941 *
942 * recvmsg() flags:
943 *
944 * SCTP_UNORDERED - This flag is present when the message was sent
945 * non-ordered.
946 */
947 sinfo.sinfo_flags = event->flags; 876 sinfo.sinfo_flags = event->flags;
948 /* sinfo_tsn: 32 bit (unsigned integer)
949 *
950 * For the receiving side, this field holds a TSN that was
951 * assigned to one of the SCTP Data Chunks.
952 */
953 sinfo.sinfo_tsn = event->tsn; 877 sinfo.sinfo_tsn = event->tsn;
954 /* sinfo_cumtsn: 32 bit (unsigned integer)
955 *
956 * This field will hold the current cumulative TSN as
957 * known by the underlying SCTP layer. Note this field is
958 * ignored when sending and only valid for a receive
959 * operation when sinfo_flags are set to SCTP_UNORDERED.
960 */
961 sinfo.sinfo_cumtsn = event->cumtsn; 878 sinfo.sinfo_cumtsn = event->cumtsn;
962 /* sinfo_assoc_id: sizeof (sctp_assoc_t)
963 *
964 * The association handle field, sinfo_assoc_id, holds the identifier
965 * for the association announced in the COMMUNICATION_UP notification.
966 * All notifications for a given association have the same identifier.
967 * Ignored for one-to-one style sockets.
968 */
969 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); 879 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
970 880 /* Context value that is set via SCTP_CONTEXT socket option. */
971 /* context value that is set via SCTP_CONTEXT socket option. */
972 sinfo.sinfo_context = event->asoc->default_rcv_context; 881 sinfo.sinfo_context = event->asoc->default_rcv_context;
973
974 /* These fields are not used while receiving. */ 882 /* These fields are not used while receiving. */
975 sinfo.sinfo_timetolive = 0; 883 sinfo.sinfo_timetolive = 0;
976 884
977 put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, 885 put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
978 sizeof(struct sctp_sndrcvinfo), (void *)&sinfo); 886 sizeof(sinfo), &sinfo);
979} 887}
980 888
981/* Do accounting for bytes received and hold a reference to the association 889/* Do accounting for bytes received and hold a reference to the association
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 247e973544bf..f77366717420 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -592,6 +592,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
592 put_group_info(acred.group_info); 592 put_group_info(acred.group_info);
593 return ret; 593 return ret;
594} 594}
595EXPORT_SYMBOL_GPL(rpcauth_lookupcred);
595 596
596void 597void
597rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, 598rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 26631679a1fa..55c6c9d3e1ce 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -559,6 +559,7 @@ receive:
559 559
560 buf = node->bclink.deferred_head; 560 buf = node->bclink.deferred_head;
561 node->bclink.deferred_head = buf->next; 561 node->bclink.deferred_head = buf->next;
562 buf->next = NULL;
562 node->bclink.deferred_size--; 563 node->bclink.deferred_size--;
563 goto receive; 564 goto receive;
564 } 565 }
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 8be6e94a1ca9..0a37a472c29f 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -101,9 +101,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
101} 101}
102 102
103/* tipc_buf_append(): Append a buffer to the fragment list of another buffer 103/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
104 * Let first buffer become head buffer 104 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
105 * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0 105 * out: set when successful non-complete reassembly, otherwise NULL
106 * Leaves headbuf pointer at NULL if failure 106 * @*buf: in: the buffer to append. Always defined
107 * out: head buf after sucessful complete reassembly, otherwise NULL
108 * Returns 1 when reassembly complete, otherwise 0
107 */ 109 */
108int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) 110int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
109{ 111{
@@ -122,6 +124,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
122 goto out_free; 124 goto out_free;
123 head = *headbuf = frag; 125 head = *headbuf = frag;
124 skb_frag_list_init(head); 126 skb_frag_list_init(head);
127 *buf = NULL;
125 return 0; 128 return 0;
126 } 129 }
127 if (!head) 130 if (!head)
@@ -150,5 +153,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
150out_free: 153out_free:
151 pr_warn_ratelimited("Unable to build fragment list\n"); 154 pr_warn_ratelimited("Unable to build fragment list\n");
152 kfree_skb(*buf); 155 kfree_skb(*buf);
156 kfree_skb(*headbuf);
157 *buf = *headbuf = NULL;
153 return 0; 158 return 0;
154} 159}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index e9afbf10e756..7e3a3cef7df9 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -424,7 +424,7 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
424 if (end >= start) 424 if (end >= start)
425 return jiffies_to_msecs(end - start); 425 return jiffies_to_msecs(end - start);
426 426
427 return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1); 427 return jiffies_to_msecs(end + (ULONG_MAX - start) + 1);
428} 428}
429 429
430void 430void
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ba4f1723c83a..6668daf69326 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1497,18 +1497,17 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
1497 } 1497 }
1498 CMD(start_p2p_device, START_P2P_DEVICE); 1498 CMD(start_p2p_device, START_P2P_DEVICE);
1499 CMD(set_mcast_rate, SET_MCAST_RATE); 1499 CMD(set_mcast_rate, SET_MCAST_RATE);
1500#ifdef CONFIG_NL80211_TESTMODE
1501 CMD(testmode_cmd, TESTMODE);
1502#endif
1500 if (state->split) { 1503 if (state->split) {
1501 CMD(crit_proto_start, CRIT_PROTOCOL_START); 1504 CMD(crit_proto_start, CRIT_PROTOCOL_START);
1502 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP); 1505 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
1503 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) 1506 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
1504 CMD(channel_switch, CHANNEL_SWITCH); 1507 CMD(channel_switch, CHANNEL_SWITCH);
1508 CMD(set_qos_map, SET_QOS_MAP);
1505 } 1509 }
1506 CMD(set_qos_map, SET_QOS_MAP); 1510 /* add into the if now */
1507
1508#ifdef CONFIG_NL80211_TESTMODE
1509 CMD(testmode_cmd, TESTMODE);
1510#endif
1511
1512#undef CMD 1511#undef CMD
1513 1512
1514 if (rdev->ops->connect || rdev->ops->auth) { 1513 if (rdev->ops->connect || rdev->ops->auth) {
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 558b0e3a02d8..1afdf45db38f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -935,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
935 if (!band_rule_found) 935 if (!band_rule_found)
936 band_rule_found = freq_in_rule_band(fr, center_freq); 936 band_rule_found = freq_in_rule_band(fr, center_freq);
937 937
938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5)); 938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
939 939
940 if (band_rule_found && bw_fits) 940 if (band_rule_found && bw_fits)
941 return rr; 941 return rr;
@@ -1019,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
1019} 1019}
1020#endif 1020#endif
1021 1021
1022/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency 1022/*
1023 * chan->center_freq fits there. 1023 * Note that right now we assume the desired channel bandwidth
1024 * If there is no such reg_rule, disable the channel, otherwise set the 1024 * is always 20 MHz for each individual channel (HT40 uses 20 MHz
1025 * flags corresponding to the bandwidths allowed in the particular reg_rule 1025 * per channel, the primary and the extension channel).
1026 */ 1026 */
1027static void handle_channel(struct wiphy *wiphy, 1027static void handle_channel(struct wiphy *wiphy,
1028 enum nl80211_reg_initiator initiator, 1028 enum nl80211_reg_initiator initiator,
@@ -1083,12 +1083,8 @@ static void handle_channel(struct wiphy *wiphy,
1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1085 1085
1086 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1087 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1088 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1089 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1090 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1086 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1091 bw_flags |= IEEE80211_CHAN_NO_HT40; 1087 bw_flags = IEEE80211_CHAN_NO_HT40;
1092 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1088 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1093 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1089 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1094 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1090 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1522,12 +1518,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
1522 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1518 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1523 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1519 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1524 1520
1525 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1526 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1527 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1528 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1529 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1521 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1530 bw_flags |= IEEE80211_CHAN_NO_HT40; 1522 bw_flags = IEEE80211_CHAN_NO_HT40;
1531 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1523 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1532 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1524 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1533 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1525 if (max_bandwidth_khz < MHZ_TO_KHZ(160))