aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/atm/mpc.h4
-rw-r--r--net/atm/mpoa_caches.c4
-rw-r--r--net/bluetooth/af_bluetooth.c12
-rw-r--r--net/bluetooth/cmtp/core.c3
-rw-r--r--net/bluetooth/hci_conn.c103
-rw-r--r--net/bluetooth/hci_core.c576
-rw-r--r--net/bluetooth/hci_request.c682
-rw-r--r--net/bluetooth/hci_request.h25
-rw-r--r--net/bluetooth/hci_sock.c200
-rw-r--r--net/bluetooth/l2cap_core.c19
-rw-r--r--net/bluetooth/mgmt.c616
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/ceph/auth_x.c36
-rw-r--r--net/ceph/ceph_common.c18
-rw-r--r--net/ceph/crypto.h4
-rw-r--r--net/ceph/messenger.c88
-rw-r--r--net/ceph/osd_client.c34
-rw-r--r--net/core/dev.c138
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net-traces.c4
-rw-r--r--net/core/rtnetlink.c274
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/dsa/dsa.c18
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/ipconfig.c62
-rw-r--r--net/ipv4/ipmr.c597
-rw-r--r--net/ipv4/netfilter/arp_tables.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c22
-rw-r--r--net/ipv4/raw.c8
-rw-r--r--net/ipv4/tcp.c21
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/route.c32
-rw-r--r--net/ipv6/tcp_ipv6.c19
-rw-r--r--net/mac802154/rx.c3
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h17
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c14
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c64
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c18
-rw-r--r--net/netfilter/ipset/ip_set_core.c14
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h26
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nft_counter.c49
-rw-r--r--net/netfilter/nft_dynset.c5
-rw-r--r--net/packet/af_packet.c92
-rw-r--r--net/sctp/auth.c4
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c13
-rw-r--r--net/sunrpc/cache.c53
-rw-r--r--net/sunrpc/svcsock.c40
-rw-r--r--net/tipc/bcast.c126
-rw-r--r--net/tipc/bcast.h1
-rw-r--r--net/tipc/bearer.c140
-rw-r--r--net/tipc/bearer.h8
-rw-r--r--net/tipc/core.h5
-rw-r--r--net/tipc/discover.c38
-rw-r--r--net/tipc/link.c626
-rw-r--r--net/tipc/link.h175
-rw-r--r--net/tipc/name_distr.c68
-rw-r--r--net/tipc/name_distr.h1
-rw-r--r--net/tipc/name_table.c5
-rw-r--r--net/tipc/netlink.c8
-rw-r--r--net/tipc/netlink_compat.c8
-rw-r--r--net/tipc/node.c884
-rw-r--r--net/tipc/node.h127
-rw-r--r--net/tipc/udp_media.c5
-rw-r--r--net/unix/af_unix.c24
-rw-r--r--net/vmw_vsock/vmci_transport.h2
-rw-r--r--net/vmw_vsock/vmci_transport_notify.c2
-rw-r--r--net/vmw_vsock/vmci_transport_notify.h5
-rw-r--r--net/vmw_vsock/vmci_transport_notify_qstate.c2
78 files changed, 3409 insertions, 2956 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 496b27588493..e2ed69850489 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -30,7 +30,9 @@ bool vlan_do_receive(struct sk_buff **skbp)
30 skb->pkt_type = PACKET_HOST; 30 skb->pkt_type = PACKET_HOST;
31 } 31 }
32 32
33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { 33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
34 !netif_is_macvlan_port(vlan_dev) &&
35 !netif_is_bridge_port(vlan_dev)) {
34 unsigned int offset = skb->data - skb_mac_header(skb); 36 unsigned int offset = skb->data - skb_mac_header(skb);
35 37
36 /* 38 /*
diff --git a/net/atm/mpc.h b/net/atm/mpc.h
index 0919a88bbc70..cfc7b745aa91 100644
--- a/net/atm/mpc.h
+++ b/net/atm/mpc.h
@@ -21,11 +21,11 @@ struct mpoa_client {
21 uint8_t our_ctrl_addr[ATM_ESA_LEN]; /* MPC's control ATM address */ 21 uint8_t our_ctrl_addr[ATM_ESA_LEN]; /* MPC's control ATM address */
22 22
23 rwlock_t ingress_lock; 23 rwlock_t ingress_lock;
24 struct in_cache_ops *in_ops; /* ingress cache operations */ 24 const struct in_cache_ops *in_ops; /* ingress cache operations */
25 in_cache_entry *in_cache; /* the ingress cache of this MPC */ 25 in_cache_entry *in_cache; /* the ingress cache of this MPC */
26 26
27 rwlock_t egress_lock; 27 rwlock_t egress_lock;
28 struct eg_cache_ops *eg_ops; /* egress cache operations */ 28 const struct eg_cache_ops *eg_ops; /* egress cache operations */
29 eg_cache_entry *eg_cache; /* the egress cache of this MPC */ 29 eg_cache_entry *eg_cache; /* the egress cache of this MPC */
30 30
31 uint8_t *mps_macs; /* array of MPS MAC addresses, >=1 */ 31 uint8_t *mps_macs; /* array of MPS MAC addresses, >=1 */
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index d1b2d9a03144..9e60e74c807d 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -534,7 +534,7 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
534} 534}
535 535
536 536
537static struct in_cache_ops ingress_ops = { 537static const struct in_cache_ops ingress_ops = {
538 in_cache_add_entry, /* add_entry */ 538 in_cache_add_entry, /* add_entry */
539 in_cache_get, /* get */ 539 in_cache_get, /* get */
540 in_cache_get_with_mask, /* get_with_mask */ 540 in_cache_get_with_mask, /* get_with_mask */
@@ -548,7 +548,7 @@ static struct in_cache_ops ingress_ops = {
548 in_destroy_cache /* destroy_cache */ 548 in_destroy_cache /* destroy_cache */
549}; 549};
550 550
551static struct eg_cache_ops egress_ops = { 551static const struct eg_cache_ops egress_ops = {
552 eg_cache_add_entry, /* add_entry */ 552 eg_cache_add_entry, /* add_entry */
553 eg_cache_get_by_cache_id, /* get_by_cache_id */ 553 eg_cache_get_by_cache_id, /* get_by_cache_id */
554 eg_cache_get_by_tag, /* get_by_tag */ 554 eg_cache_get_by_tag, /* get_by_tag */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index a3bffd1ec2b4..a83c6a73f562 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -33,8 +33,6 @@
33 33
34#include "selftest.h" 34#include "selftest.h"
35 35
36#define VERSION "2.21"
37
38/* Bluetooth sockets */ 36/* Bluetooth sockets */
39#define BT_MAX_PROTO 8 37#define BT_MAX_PROTO 8
40static const struct net_proto_family *bt_proto[BT_MAX_PROTO]; 38static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
@@ -671,7 +669,7 @@ static const struct file_operations bt_fops = {
671}; 669};
672 670
673int bt_procfs_init(struct net *net, const char *name, 671int bt_procfs_init(struct net *net, const char *name,
674 struct bt_sock_list* sk_list, 672 struct bt_sock_list *sk_list,
675 int (* seq_show)(struct seq_file *, void *)) 673 int (* seq_show)(struct seq_file *, void *))
676{ 674{
677 sk_list->custom_seq_show = seq_show; 675 sk_list->custom_seq_show = seq_show;
@@ -687,7 +685,7 @@ void bt_procfs_cleanup(struct net *net, const char *name)
687} 685}
688#else 686#else
689int bt_procfs_init(struct net *net, const char *name, 687int bt_procfs_init(struct net *net, const char *name,
690 struct bt_sock_list* sk_list, 688 struct bt_sock_list *sk_list,
691 int (* seq_show)(struct seq_file *, void *)) 689 int (* seq_show)(struct seq_file *, void *))
692{ 690{
693 return 0; 691 return 0;
@@ -715,7 +713,7 @@ static int __init bt_init(void)
715 713
716 sock_skb_cb_check_size(sizeof(struct bt_skb_cb)); 714 sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
717 715
718 BT_INFO("Core ver %s", VERSION); 716 BT_INFO("Core ver %s", BT_SUBSYS_VERSION);
719 717
720 err = bt_selftest(); 718 err = bt_selftest();
721 if (err < 0) 719 if (err < 0)
@@ -789,7 +787,7 @@ subsys_initcall(bt_init);
789module_exit(bt_exit); 787module_exit(bt_exit);
790 788
791MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 789MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
792MODULE_DESCRIPTION("Bluetooth Core ver " VERSION); 790MODULE_DESCRIPTION("Bluetooth Core ver " BT_SUBSYS_VERSION);
793MODULE_VERSION(VERSION); 791MODULE_VERSION(BT_SUBSYS_VERSION);
794MODULE_LICENSE("GPL"); 792MODULE_LICENSE("GPL");
795MODULE_ALIAS_NETPROTO(PF_BLUETOOTH); 793MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 298ed37010e6..9e59b6654126 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -178,8 +178,7 @@ static inline int cmtp_recv_frame(struct cmtp_session *session, struct sk_buff *
178 cmtp_add_msgpart(session, id, skb->data + hdrlen, len); 178 cmtp_add_msgpart(session, id, skb->data + hdrlen, len);
179 break; 179 break;
180 default: 180 default:
181 if (session->reassembly[id] != NULL) 181 kfree_skb(session->reassembly[id]);
182 kfree_skb(session->reassembly[id]);
183 session->reassembly[id] = NULL; 182 session->reassembly[id] = NULL;
184 break; 183 break;
185 } 184 }
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 85b82f7adbd2..2d334e07fd77 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -178,6 +178,10 @@ static void hci_connect_le_scan_remove(struct hci_conn *conn)
178 hci_dev_hold(conn->hdev); 178 hci_dev_hold(conn->hdev);
179 hci_conn_get(conn); 179 hci_conn_get(conn);
180 180
181 /* Even though we hold a reference to the hdev, many other
182 * things might get cleaned up meanwhile, including the hdev's
183 * own workqueue, so we can't use that for scheduling.
184 */
181 schedule_work(&conn->le_scan_cleanup); 185 schedule_work(&conn->le_scan_cleanup);
182} 186}
183 187
@@ -781,7 +785,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
781 u8 role) 785 u8 role)
782{ 786{
783 struct hci_conn_params *params; 787 struct hci_conn_params *params;
784 struct hci_conn *conn, *conn_unfinished; 788 struct hci_conn *conn;
785 struct smp_irk *irk; 789 struct smp_irk *irk;
786 struct hci_request req; 790 struct hci_request req;
787 int err; 791 int err;
@@ -794,35 +798,22 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
794 return ERR_PTR(-EOPNOTSUPP); 798 return ERR_PTR(-EOPNOTSUPP);
795 } 799 }
796 800
797 /* Some devices send ATT messages as soon as the physical link is
798 * established. To be able to handle these ATT messages, the user-
799 * space first establishes the connection and then starts the pairing
800 * process.
801 *
802 * So if a hci_conn object already exists for the following connection
803 * attempt, we simply update pending_sec_level and auth_type fields
804 * and return the object found.
805 */
806 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
807 conn_unfinished = NULL;
808 if (conn) {
809 if (conn->state == BT_CONNECT &&
810 test_bit(HCI_CONN_SCANNING, &conn->flags)) {
811 BT_DBG("will continue unfinished conn %pMR", dst);
812 conn_unfinished = conn;
813 } else {
814 if (conn->pending_sec_level < sec_level)
815 conn->pending_sec_level = sec_level;
816 goto done;
817 }
818 }
819
820 /* Since the controller supports only one LE connection attempt at a 801 /* Since the controller supports only one LE connection attempt at a
821 * time, we return -EBUSY if there is any connection attempt running. 802 * time, we return -EBUSY if there is any connection attempt running.
822 */ 803 */
823 if (hci_lookup_le_connect(hdev)) 804 if (hci_lookup_le_connect(hdev))
824 return ERR_PTR(-EBUSY); 805 return ERR_PTR(-EBUSY);
825 806
807 /* If there's already a connection object but it's not in
808 * scanning state it means it must already be established, in
809 * which case we can't do anything else except report a failure
810 * to connect.
811 */
812 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
813 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
814 return ERR_PTR(-EBUSY);
815 }
816
826 /* When given an identity address with existing identity 817 /* When given an identity address with existing identity
827 * resolving key, the connection needs to be established 818 * resolving key, the connection needs to be established
828 * to a resolvable random address. 819 * to a resolvable random address.
@@ -838,23 +829,20 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
838 dst_type = ADDR_LE_DEV_RANDOM; 829 dst_type = ADDR_LE_DEV_RANDOM;
839 } 830 }
840 831
841 if (conn_unfinished) { 832 if (conn) {
842 conn = conn_unfinished;
843 bacpy(&conn->dst, dst); 833 bacpy(&conn->dst, dst);
844 } else { 834 } else {
845 conn = hci_conn_add(hdev, LE_LINK, dst, role); 835 conn = hci_conn_add(hdev, LE_LINK, dst, role);
836 if (!conn)
837 return ERR_PTR(-ENOMEM);
838 hci_conn_hold(conn);
839 conn->pending_sec_level = sec_level;
846 } 840 }
847 841
848 if (!conn)
849 return ERR_PTR(-ENOMEM);
850
851 conn->dst_type = dst_type; 842 conn->dst_type = dst_type;
852 conn->sec_level = BT_SECURITY_LOW; 843 conn->sec_level = BT_SECURITY_LOW;
853 conn->conn_timeout = conn_timeout; 844 conn->conn_timeout = conn_timeout;
854 845
855 if (!conn_unfinished)
856 conn->pending_sec_level = sec_level;
857
858 hci_req_init(&req, hdev); 846 hci_req_init(&req, hdev);
859 847
860 /* Disable advertising if we're active. For master role 848 /* Disable advertising if we're active. For master role
@@ -918,37 +906,9 @@ create_conn:
918 return ERR_PTR(err); 906 return ERR_PTR(err);
919 } 907 }
920 908
921done:
922 /* If this is continuation of connect started by hci_connect_le_scan,
923 * it already called hci_conn_hold and calling it again would mess the
924 * counter.
925 */
926 if (!conn_unfinished)
927 hci_conn_hold(conn);
928
929 return conn; 909 return conn;
930} 910}
931 911
932static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
933 u16 opcode)
934{
935 struct hci_conn *conn;
936
937 if (!status)
938 return;
939
940 BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
941 status);
942
943 hci_dev_lock(hdev);
944
945 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
946 if (conn)
947 hci_le_conn_failed(conn, status);
948
949 hci_dev_unlock(hdev);
950}
951
952static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) 912static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
953{ 913{
954 struct hci_conn *conn; 914 struct hci_conn *conn;
@@ -964,10 +924,9 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
964} 924}
965 925
966/* This function requires the caller holds hdev->lock */ 926/* This function requires the caller holds hdev->lock */
967static int hci_explicit_conn_params_set(struct hci_request *req, 927static int hci_explicit_conn_params_set(struct hci_dev *hdev,
968 bdaddr_t *addr, u8 addr_type) 928 bdaddr_t *addr, u8 addr_type)
969{ 929{
970 struct hci_dev *hdev = req->hdev;
971 struct hci_conn_params *params; 930 struct hci_conn_params *params;
972 931
973 if (is_connected(hdev, addr, addr_type)) 932 if (is_connected(hdev, addr, addr_type))
@@ -995,7 +954,6 @@ static int hci_explicit_conn_params_set(struct hci_request *req,
995 } 954 }
996 955
997 params->explicit_connect = true; 956 params->explicit_connect = true;
998 __hci_update_background_scan(req);
999 957
1000 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type, 958 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1001 params->auto_connect); 959 params->auto_connect);
@@ -1006,11 +964,9 @@ static int hci_explicit_conn_params_set(struct hci_request *req,
1006/* This function requires the caller holds hdev->lock */ 964/* This function requires the caller holds hdev->lock */
1007struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, 965struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1008 u8 dst_type, u8 sec_level, 966 u8 dst_type, u8 sec_level,
1009 u16 conn_timeout, u8 role) 967 u16 conn_timeout)
1010{ 968{
1011 struct hci_conn *conn; 969 struct hci_conn *conn;
1012 struct hci_request req;
1013 int err;
1014 970
1015 /* Let's make sure that le is enabled.*/ 971 /* Let's make sure that le is enabled.*/
1016 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 972 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
@@ -1038,29 +994,22 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1038 994
1039 BT_DBG("requesting refresh of dst_addr"); 995 BT_DBG("requesting refresh of dst_addr");
1040 996
1041 conn = hci_conn_add(hdev, LE_LINK, dst, role); 997 conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1042 if (!conn) 998 if (!conn)
1043 return ERR_PTR(-ENOMEM); 999 return ERR_PTR(-ENOMEM);
1044 1000
1045 hci_req_init(&req, hdev); 1001 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0)
1046
1047 if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
1048 return ERR_PTR(-EBUSY); 1002 return ERR_PTR(-EBUSY);
1049 1003
1050 conn->state = BT_CONNECT; 1004 conn->state = BT_CONNECT;
1051 set_bit(HCI_CONN_SCANNING, &conn->flags); 1005 set_bit(HCI_CONN_SCANNING, &conn->flags);
1052
1053 err = hci_req_run(&req, hci_connect_le_scan_complete);
1054 if (err && err != -ENODATA) {
1055 hci_conn_del(conn);
1056 return ERR_PTR(err);
1057 }
1058
1059 conn->dst_type = dst_type; 1006 conn->dst_type = dst_type;
1060 conn->sec_level = BT_SECURITY_LOW; 1007 conn->sec_level = BT_SECURITY_LOW;
1061 conn->pending_sec_level = sec_level; 1008 conn->pending_sec_level = sec_level;
1062 conn->conn_timeout = conn_timeout; 1009 conn->conn_timeout = conn_timeout;
1063 1010
1011 hci_update_background_scan(hdev);
1012
1064done: 1013done:
1065 hci_conn_hold(conn); 1014 hci_conn_hold(conn);
1066 return conn; 1015 return conn;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 62edbf1b114e..89af7e4fac02 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -56,15 +56,6 @@ DEFINE_MUTEX(hci_cb_list_lock);
56/* HCI ID Numbering */ 56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida); 57static DEFINE_IDA(hci_index_ida);
58 58
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
68/* ---- HCI debugfs entries ---- */ 59/* ---- HCI debugfs entries ---- */
69 60
70static ssize_t dut_mode_read(struct file *file, char __user *user_buf, 61static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
@@ -73,7 +64,7 @@ static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
73 struct hci_dev *hdev = file->private_data; 64 struct hci_dev *hdev = file->private_data;
74 char buf[3]; 65 char buf[3];
75 66
76 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N'; 67 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
77 buf[1] = '\n'; 68 buf[1] = '\n';
78 buf[2] = '\0'; 69 buf[2] = '\0';
79 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -101,14 +92,14 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
101 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) 92 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
102 return -EALREADY; 93 return -EALREADY;
103 94
104 hci_req_lock(hdev); 95 hci_req_sync_lock(hdev);
105 if (enable) 96 if (enable)
106 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, 97 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
107 HCI_CMD_TIMEOUT); 98 HCI_CMD_TIMEOUT);
108 else 99 else
109 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, 100 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
110 HCI_CMD_TIMEOUT); 101 HCI_CMD_TIMEOUT);
111 hci_req_unlock(hdev); 102 hci_req_sync_unlock(hdev);
112 103
113 if (IS_ERR(skb)) 104 if (IS_ERR(skb))
114 return PTR_ERR(skb); 105 return PTR_ERR(skb);
@@ -133,7 +124,7 @@ static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
133 struct hci_dev *hdev = file->private_data; 124 struct hci_dev *hdev = file->private_data;
134 char buf[3]; 125 char buf[3];
135 126
136 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N'; 127 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
137 buf[1] = '\n'; 128 buf[1] = '\n';
138 buf[2] = '\0'; 129 buf[2] = '\0';
139 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 130 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -165,9 +156,9 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
165 !test_bit(HCI_RUNNING, &hdev->flags)) 156 !test_bit(HCI_RUNNING, &hdev->flags))
166 goto done; 157 goto done;
167 158
168 hci_req_lock(hdev); 159 hci_req_sync_lock(hdev);
169 err = hdev->set_diag(hdev, enable); 160 err = hdev->set_diag(hdev, enable);
170 hci_req_unlock(hdev); 161 hci_req_sync_unlock(hdev);
171 162
172 if (err < 0) 163 if (err < 0)
173 return err; 164 return err;
@@ -198,197 +189,14 @@ static void hci_debugfs_create_basic(struct hci_dev *hdev)
198 &vendor_diag_fops); 189 &vendor_diag_fops);
199} 190}
200 191
201/* ---- HCI requests ---- */ 192static int hci_reset_req(struct hci_request *req, unsigned long opt)
202
203static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
204 struct sk_buff *skb)
205{
206 BT_DBG("%s result 0x%2.2x", hdev->name, result);
207
208 if (hdev->req_status == HCI_REQ_PEND) {
209 hdev->req_result = result;
210 hdev->req_status = HCI_REQ_DONE;
211 if (skb)
212 hdev->req_skb = skb_get(skb);
213 wake_up_interruptible(&hdev->req_wait_q);
214 }
215}
216
217static void hci_req_cancel(struct hci_dev *hdev, int err)
218{
219 BT_DBG("%s err 0x%2.2x", hdev->name, err);
220
221 if (hdev->req_status == HCI_REQ_PEND) {
222 hdev->req_result = err;
223 hdev->req_status = HCI_REQ_CANCELED;
224 wake_up_interruptible(&hdev->req_wait_q);
225 }
226}
227
228struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
229 const void *param, u8 event, u32 timeout)
230{
231 DECLARE_WAITQUEUE(wait, current);
232 struct hci_request req;
233 struct sk_buff *skb;
234 int err = 0;
235
236 BT_DBG("%s", hdev->name);
237
238 hci_req_init(&req, hdev);
239
240 hci_req_add_ev(&req, opcode, plen, param, event);
241
242 hdev->req_status = HCI_REQ_PEND;
243
244 add_wait_queue(&hdev->req_wait_q, &wait);
245 set_current_state(TASK_INTERRUPTIBLE);
246
247 err = hci_req_run_skb(&req, hci_req_sync_complete);
248 if (err < 0) {
249 remove_wait_queue(&hdev->req_wait_q, &wait);
250 set_current_state(TASK_RUNNING);
251 return ERR_PTR(err);
252 }
253
254 schedule_timeout(timeout);
255
256 remove_wait_queue(&hdev->req_wait_q, &wait);
257
258 if (signal_pending(current))
259 return ERR_PTR(-EINTR);
260
261 switch (hdev->req_status) {
262 case HCI_REQ_DONE:
263 err = -bt_to_errno(hdev->req_result);
264 break;
265
266 case HCI_REQ_CANCELED:
267 err = -hdev->req_result;
268 break;
269
270 default:
271 err = -ETIMEDOUT;
272 break;
273 }
274
275 hdev->req_status = hdev->req_result = 0;
276 skb = hdev->req_skb;
277 hdev->req_skb = NULL;
278
279 BT_DBG("%s end: err %d", hdev->name, err);
280
281 if (err < 0) {
282 kfree_skb(skb);
283 return ERR_PTR(err);
284 }
285
286 if (!skb)
287 return ERR_PTR(-ENODATA);
288
289 return skb;
290}
291EXPORT_SYMBOL(__hci_cmd_sync_ev);
292
293struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
294 const void *param, u32 timeout)
295{
296 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
297}
298EXPORT_SYMBOL(__hci_cmd_sync);
299
300/* Execute request and wait for completion. */
301static int __hci_req_sync(struct hci_dev *hdev,
302 void (*func)(struct hci_request *req,
303 unsigned long opt),
304 unsigned long opt, __u32 timeout)
305{
306 struct hci_request req;
307 DECLARE_WAITQUEUE(wait, current);
308 int err = 0;
309
310 BT_DBG("%s start", hdev->name);
311
312 hci_req_init(&req, hdev);
313
314 hdev->req_status = HCI_REQ_PEND;
315
316 func(&req, opt);
317
318 add_wait_queue(&hdev->req_wait_q, &wait);
319 set_current_state(TASK_INTERRUPTIBLE);
320
321 err = hci_req_run_skb(&req, hci_req_sync_complete);
322 if (err < 0) {
323 hdev->req_status = 0;
324
325 remove_wait_queue(&hdev->req_wait_q, &wait);
326 set_current_state(TASK_RUNNING);
327
328 /* ENODATA means the HCI request command queue is empty.
329 * This can happen when a request with conditionals doesn't
330 * trigger any commands to be sent. This is normal behavior
331 * and should not trigger an error return.
332 */
333 if (err == -ENODATA)
334 return 0;
335
336 return err;
337 }
338
339 schedule_timeout(timeout);
340
341 remove_wait_queue(&hdev->req_wait_q, &wait);
342
343 if (signal_pending(current))
344 return -EINTR;
345
346 switch (hdev->req_status) {
347 case HCI_REQ_DONE:
348 err = -bt_to_errno(hdev->req_result);
349 break;
350
351 case HCI_REQ_CANCELED:
352 err = -hdev->req_result;
353 break;
354
355 default:
356 err = -ETIMEDOUT;
357 break;
358 }
359
360 hdev->req_status = hdev->req_result = 0;
361
362 BT_DBG("%s end: err %d", hdev->name, err);
363
364 return err;
365}
366
367static int hci_req_sync(struct hci_dev *hdev,
368 void (*req)(struct hci_request *req,
369 unsigned long opt),
370 unsigned long opt, __u32 timeout)
371{
372 int ret;
373
374 if (!test_bit(HCI_UP, &hdev->flags))
375 return -ENETDOWN;
376
377 /* Serialize all requests */
378 hci_req_lock(hdev);
379 ret = __hci_req_sync(hdev, req, opt, timeout);
380 hci_req_unlock(hdev);
381
382 return ret;
383}
384
385static void hci_reset_req(struct hci_request *req, unsigned long opt)
386{ 193{
387 BT_DBG("%s %ld", req->hdev->name, opt); 194 BT_DBG("%s %ld", req->hdev->name, opt);
388 195
389 /* Reset device */ 196 /* Reset device */
390 set_bit(HCI_RESET, &req->hdev->flags); 197 set_bit(HCI_RESET, &req->hdev->flags);
391 hci_req_add(req, HCI_OP_RESET, 0, NULL); 198 hci_req_add(req, HCI_OP_RESET, 0, NULL);
199 return 0;
392} 200}
393 201
394static void bredr_init(struct hci_request *req) 202static void bredr_init(struct hci_request *req)
@@ -428,7 +236,7 @@ static void amp_init1(struct hci_request *req)
428 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL); 236 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
429} 237}
430 238
431static void amp_init2(struct hci_request *req) 239static int amp_init2(struct hci_request *req)
432{ 240{
433 /* Read Local Supported Features. Not all AMP controllers 241 /* Read Local Supported Features. Not all AMP controllers
434 * support this so it's placed conditionally in the second 242 * support this so it's placed conditionally in the second
@@ -436,9 +244,11 @@ static void amp_init2(struct hci_request *req)
436 */ 244 */
437 if (req->hdev->commands[14] & 0x20) 245 if (req->hdev->commands[14] & 0x20)
438 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 246 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
247
248 return 0;
439} 249}
440 250
441static void hci_init1_req(struct hci_request *req, unsigned long opt) 251static int hci_init1_req(struct hci_request *req, unsigned long opt)
442{ 252{
443 struct hci_dev *hdev = req->hdev; 253 struct hci_dev *hdev = req->hdev;
444 254
@@ -461,6 +271,8 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
461 BT_ERR("Unknown device type %d", hdev->dev_type); 271 BT_ERR("Unknown device type %d", hdev->dev_type);
462 break; 272 break;
463 } 273 }
274
275 return 0;
464} 276}
465 277
466static void bredr_setup(struct hci_request *req) 278static void bredr_setup(struct hci_request *req)
@@ -531,20 +343,30 @@ static void hci_setup_event_mask(struct hci_request *req)
531 343
532 if (lmp_bredr_capable(hdev)) { 344 if (lmp_bredr_capable(hdev)) {
533 events[4] |= 0x01; /* Flow Specification Complete */ 345 events[4] |= 0x01; /* Flow Specification Complete */
534 events[4] |= 0x02; /* Inquiry Result with RSSI */
535 events[4] |= 0x04; /* Read Remote Extended Features Complete */
536 events[5] |= 0x08; /* Synchronous Connection Complete */
537 events[5] |= 0x10; /* Synchronous Connection Changed */
538 } else { 346 } else {
539 /* Use a different default for LE-only devices */ 347 /* Use a different default for LE-only devices */
540 memset(events, 0, sizeof(events)); 348 memset(events, 0, sizeof(events));
541 events[0] |= 0x10; /* Disconnection Complete */
542 events[1] |= 0x08; /* Read Remote Version Information Complete */
543 events[1] |= 0x20; /* Command Complete */ 349 events[1] |= 0x20; /* Command Complete */
544 events[1] |= 0x40; /* Command Status */ 350 events[1] |= 0x40; /* Command Status */
545 events[1] |= 0x80; /* Hardware Error */ 351 events[1] |= 0x80; /* Hardware Error */
546 events[2] |= 0x04; /* Number of Completed Packets */ 352
547 events[3] |= 0x02; /* Data Buffer Overflow */ 353 /* If the controller supports the Disconnect command, enable
354 * the corresponding event. In addition enable packet flow
355 * control related events.
356 */
357 if (hdev->commands[0] & 0x20) {
358 events[0] |= 0x10; /* Disconnection Complete */
359 events[2] |= 0x04; /* Number of Completed Packets */
360 events[3] |= 0x02; /* Data Buffer Overflow */
361 }
362
363 /* If the controller supports the Read Remote Version
364 * Information command, enable the corresponding event.
365 */
366 if (hdev->commands[2] & 0x80)
367 events[1] |= 0x08; /* Read Remote Version Information
368 * Complete
369 */
548 370
549 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { 371 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
550 events[0] |= 0x80; /* Encryption Change */ 372 events[0] |= 0x80; /* Encryption Change */
@@ -552,9 +374,18 @@ static void hci_setup_event_mask(struct hci_request *req)
552 } 374 }
553 } 375 }
554 376
555 if (lmp_inq_rssi_capable(hdev)) 377 if (lmp_inq_rssi_capable(hdev) ||
378 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
556 events[4] |= 0x02; /* Inquiry Result with RSSI */ 379 events[4] |= 0x02; /* Inquiry Result with RSSI */
557 380
381 if (lmp_ext_feat_capable(hdev))
382 events[4] |= 0x04; /* Read Remote Extended Features Complete */
383
384 if (lmp_esco_capable(hdev)) {
385 events[5] |= 0x08; /* Synchronous Connection Complete */
386 events[5] |= 0x10; /* Synchronous Connection Changed */
387 }
388
558 if (lmp_sniffsubr_capable(hdev)) 389 if (lmp_sniffsubr_capable(hdev))
559 events[5] |= 0x20; /* Sniff Subrating */ 390 events[5] |= 0x20; /* Sniff Subrating */
560 391
@@ -590,7 +421,7 @@ static void hci_setup_event_mask(struct hci_request *req)
590 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 421 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
591} 422}
592 423
593static void hci_init2_req(struct hci_request *req, unsigned long opt) 424static int hci_init2_req(struct hci_request *req, unsigned long opt)
594{ 425{
595 struct hci_dev *hdev = req->hdev; 426 struct hci_dev *hdev = req->hdev;
596 427
@@ -670,6 +501,8 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
670 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), 501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
671 &enable); 502 &enable);
672 } 503 }
504
505 return 0;
673} 506}
674 507
675static void hci_setup_link_policy(struct hci_request *req) 508static void hci_setup_link_policy(struct hci_request *req)
@@ -744,7 +577,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
744 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events); 577 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
745} 578}
746 579
747static void hci_init3_req(struct hci_request *req, unsigned long opt) 580static int hci_init3_req(struct hci_request *req, unsigned long opt)
748{ 581{
749 struct hci_dev *hdev = req->hdev; 582 struct hci_dev *hdev = req->hdev;
750 u8 p; 583 u8 p;
@@ -777,7 +610,6 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
777 u8 events[8]; 610 u8 events[8];
778 611
779 memset(events, 0, sizeof(events)); 612 memset(events, 0, sizeof(events));
780 events[0] = 0x0f;
781 613
782 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) 614 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
783 events[0] |= 0x10; /* LE Long Term Key Request */ 615 events[0] |= 0x10; /* LE Long Term Key Request */
@@ -804,6 +636,34 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
804 * Report 636 * Report
805 */ 637 */
806 638
639 /* If the controller supports the LE Set Scan Enable command,
640 * enable the corresponding advertising report event.
641 */
642 if (hdev->commands[26] & 0x08)
643 events[0] |= 0x02; /* LE Advertising Report */
644
645 /* If the controller supports the LE Create Connection
646 * command, enable the corresponding event.
647 */
648 if (hdev->commands[26] & 0x10)
649 events[0] |= 0x01; /* LE Connection Complete */
650
651 /* If the controller supports the LE Connection Update
652 * command, enable the corresponding event.
653 */
654 if (hdev->commands[27] & 0x04)
655 events[0] |= 0x04; /* LE Connection Update
656 * Complete
657 */
658
659 /* If the controller supports the LE Read Remote Used Features
660 * command, enable the corresponding event.
661 */
662 if (hdev->commands[27] & 0x20)
663 events[0] |= 0x08; /* LE Read Remote Used
664 * Features Complete
665 */
666
807 /* If the controller supports the LE Read Local P-256 667 /* If the controller supports the LE Read Local P-256
808 * Public Key command, enable the corresponding event. 668 * Public Key command, enable the corresponding event.
809 */ 669 */
@@ -856,9 +716,11 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
856 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, 716 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
857 sizeof(cp), &cp); 717 sizeof(cp), &cp);
858 } 718 }
719
720 return 0;
859} 721}
860 722
861static void hci_init4_req(struct hci_request *req, unsigned long opt) 723static int hci_init4_req(struct hci_request *req, unsigned long opt)
862{ 724{
863 struct hci_dev *hdev = req->hdev; 725 struct hci_dev *hdev = req->hdev;
864 726
@@ -909,20 +771,22 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
909 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, 771 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
910 sizeof(support), &support); 772 sizeof(support), &support);
911 } 773 }
774
775 return 0;
912} 776}
913 777
914static int __hci_init(struct hci_dev *hdev) 778static int __hci_init(struct hci_dev *hdev)
915{ 779{
916 int err; 780 int err;
917 781
918 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT); 782 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
919 if (err < 0) 783 if (err < 0)
920 return err; 784 return err;
921 785
922 if (hci_dev_test_flag(hdev, HCI_SETUP)) 786 if (hci_dev_test_flag(hdev, HCI_SETUP))
923 hci_debugfs_create_basic(hdev); 787 hci_debugfs_create_basic(hdev);
924 788
925 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT); 789 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
926 if (err < 0) 790 if (err < 0)
927 return err; 791 return err;
928 792
@@ -933,11 +797,11 @@ static int __hci_init(struct hci_dev *hdev)
933 if (hdev->dev_type != HCI_BREDR) 797 if (hdev->dev_type != HCI_BREDR)
934 return 0; 798 return 0;
935 799
936 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT); 800 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
937 if (err < 0) 801 if (err < 0)
938 return err; 802 return err;
939 803
940 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT); 804 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
941 if (err < 0) 805 if (err < 0)
942 return err; 806 return err;
943 807
@@ -968,7 +832,7 @@ static int __hci_init(struct hci_dev *hdev)
968 return 0; 832 return 0;
969} 833}
970 834
971static void hci_init0_req(struct hci_request *req, unsigned long opt) 835static int hci_init0_req(struct hci_request *req, unsigned long opt)
972{ 836{
973 struct hci_dev *hdev = req->hdev; 837 struct hci_dev *hdev = req->hdev;
974 838
@@ -984,6 +848,8 @@ static void hci_init0_req(struct hci_request *req, unsigned long opt)
984 /* Read BD Address */ 848 /* Read BD Address */
985 if (hdev->set_bdaddr) 849 if (hdev->set_bdaddr)
986 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); 850 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
851
852 return 0;
987} 853}
988 854
989static int __hci_unconf_init(struct hci_dev *hdev) 855static int __hci_unconf_init(struct hci_dev *hdev)
@@ -993,7 +859,7 @@ static int __hci_unconf_init(struct hci_dev *hdev)
993 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 859 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
994 return 0; 860 return 0;
995 861
996 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT); 862 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
997 if (err < 0) 863 if (err < 0)
998 return err; 864 return err;
999 865
@@ -1003,7 +869,7 @@ static int __hci_unconf_init(struct hci_dev *hdev)
1003 return 0; 869 return 0;
1004} 870}
1005 871
1006static void hci_scan_req(struct hci_request *req, unsigned long opt) 872static int hci_scan_req(struct hci_request *req, unsigned long opt)
1007{ 873{
1008 __u8 scan = opt; 874 __u8 scan = opt;
1009 875
@@ -1011,9 +877,10 @@ static void hci_scan_req(struct hci_request *req, unsigned long opt)
1011 877
1012 /* Inquiry and Page scans */ 878 /* Inquiry and Page scans */
1013 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 879 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
880 return 0;
1014} 881}
1015 882
1016static void hci_auth_req(struct hci_request *req, unsigned long opt) 883static int hci_auth_req(struct hci_request *req, unsigned long opt)
1017{ 884{
1018 __u8 auth = opt; 885 __u8 auth = opt;
1019 886
@@ -1021,9 +888,10 @@ static void hci_auth_req(struct hci_request *req, unsigned long opt)
1021 888
1022 /* Authentication */ 889 /* Authentication */
1023 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); 890 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
891 return 0;
1024} 892}
1025 893
1026static void hci_encrypt_req(struct hci_request *req, unsigned long opt) 894static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1027{ 895{
1028 __u8 encrypt = opt; 896 __u8 encrypt = opt;
1029 897
@@ -1031,9 +899,10 @@ static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1031 899
1032 /* Encryption */ 900 /* Encryption */
1033 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); 901 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
902 return 0;
1034} 903}
1035 904
1036static void hci_linkpol_req(struct hci_request *req, unsigned long opt) 905static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1037{ 906{
1038 __le16 policy = cpu_to_le16(opt); 907 __le16 policy = cpu_to_le16(opt);
1039 908
@@ -1041,6 +910,7 @@ static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1041 910
1042 /* Default link policy */ 911 /* Default link policy */
1043 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); 912 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
913 return 0;
1044} 914}
1045 915
1046/* Get HCI device by index. 916/* Get HCI device by index.
@@ -1285,7 +1155,7 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1285 return copied; 1155 return copied;
1286} 1156}
1287 1157
1288static void hci_inq_req(struct hci_request *req, unsigned long opt) 1158static int hci_inq_req(struct hci_request *req, unsigned long opt)
1289{ 1159{
1290 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 1160 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1291 struct hci_dev *hdev = req->hdev; 1161 struct hci_dev *hdev = req->hdev;
@@ -1294,13 +1164,15 @@ static void hci_inq_req(struct hci_request *req, unsigned long opt)
1294 BT_DBG("%s", hdev->name); 1164 BT_DBG("%s", hdev->name);
1295 1165
1296 if (test_bit(HCI_INQUIRY, &hdev->flags)) 1166 if (test_bit(HCI_INQUIRY, &hdev->flags))
1297 return; 1167 return 0;
1298 1168
1299 /* Start Inquiry */ 1169 /* Start Inquiry */
1300 memcpy(&cp.lap, &ir->lap, 3); 1170 memcpy(&cp.lap, &ir->lap, 3);
1301 cp.length = ir->length; 1171 cp.length = ir->length;
1302 cp.num_rsp = ir->num_rsp; 1172 cp.num_rsp = ir->num_rsp;
1303 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); 1173 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1174
1175 return 0;
1304} 1176}
1305 1177
1306int hci_inquiry(void __user *arg) 1178int hci_inquiry(void __user *arg)
@@ -1351,7 +1223,7 @@ int hci_inquiry(void __user *arg)
1351 1223
1352 if (do_inquiry) { 1224 if (do_inquiry) {
1353 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, 1225 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1354 timeo); 1226 timeo, NULL);
1355 if (err < 0) 1227 if (err < 0)
1356 goto done; 1228 goto done;
1357 1229
@@ -1404,7 +1276,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
1404 1276
1405 BT_DBG("%s %p", hdev->name, hdev); 1277 BT_DBG("%s %p", hdev->name, hdev);
1406 1278
1407 hci_req_lock(hdev); 1279 hci_req_sync_lock(hdev);
1408 1280
1409 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 1281 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1410 ret = -ENODEV; 1282 ret = -ENODEV;
@@ -1557,7 +1429,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
1557 } 1429 }
1558 1430
1559done: 1431done:
1560 hci_req_unlock(hdev); 1432 hci_req_sync_unlock(hdev);
1561 return ret; 1433 return ret;
1562} 1434}
1563 1435
@@ -1651,12 +1523,12 @@ int hci_dev_do_close(struct hci_dev *hdev)
1651 1523
1652 cancel_delayed_work(&hdev->power_off); 1524 cancel_delayed_work(&hdev->power_off);
1653 1525
1654 hci_req_cancel(hdev, ENODEV); 1526 hci_request_cancel_all(hdev);
1655 hci_req_lock(hdev); 1527 hci_req_sync_lock(hdev);
1656 1528
1657 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 1529 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1658 cancel_delayed_work_sync(&hdev->cmd_timer); 1530 cancel_delayed_work_sync(&hdev->cmd_timer);
1659 hci_req_unlock(hdev); 1531 hci_req_sync_unlock(hdev);
1660 return 0; 1532 return 0;
1661 } 1533 }
1662 1534
@@ -1674,9 +1546,6 @@ int hci_dev_do_close(struct hci_dev *hdev)
1674 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) 1546 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1675 cancel_delayed_work(&hdev->service_cache); 1547 cancel_delayed_work(&hdev->service_cache);
1676 1548
1677 cancel_delayed_work_sync(&hdev->le_scan_disable);
1678 cancel_delayed_work_sync(&hdev->le_scan_restart);
1679
1680 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1549 if (hci_dev_test_flag(hdev, HCI_MGMT))
1681 cancel_delayed_work_sync(&hdev->rpa_expired); 1550 cancel_delayed_work_sync(&hdev->rpa_expired);
1682 1551
@@ -1717,7 +1586,7 @@ int hci_dev_do_close(struct hci_dev *hdev)
1717 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && 1586 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1718 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1587 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1719 set_bit(HCI_INIT, &hdev->flags); 1588 set_bit(HCI_INIT, &hdev->flags);
1720 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); 1589 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1721 clear_bit(HCI_INIT, &hdev->flags); 1590 clear_bit(HCI_INIT, &hdev->flags);
1722 } 1591 }
1723 1592
@@ -1754,7 +1623,7 @@ int hci_dev_do_close(struct hci_dev *hdev)
1754 memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); 1623 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1755 bacpy(&hdev->random_addr, BDADDR_ANY); 1624 bacpy(&hdev->random_addr, BDADDR_ANY);
1756 1625
1757 hci_req_unlock(hdev); 1626 hci_req_sync_unlock(hdev);
1758 1627
1759 hci_dev_put(hdev); 1628 hci_dev_put(hdev);
1760 return 0; 1629 return 0;
@@ -1790,7 +1659,7 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
1790 1659
1791 BT_DBG("%s %p", hdev->name, hdev); 1660 BT_DBG("%s %p", hdev->name, hdev);
1792 1661
1793 hci_req_lock(hdev); 1662 hci_req_sync_lock(hdev);
1794 1663
1795 /* Drop queues */ 1664 /* Drop queues */
1796 skb_queue_purge(&hdev->rx_q); 1665 skb_queue_purge(&hdev->rx_q);
@@ -1812,9 +1681,9 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
1812 atomic_set(&hdev->cmd_cnt, 1); 1681 atomic_set(&hdev->cmd_cnt, 1);
1813 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; 1682 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1814 1683
1815 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); 1684 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1816 1685
1817 hci_req_unlock(hdev); 1686 hci_req_sync_unlock(hdev);
1818 return ret; 1687 return ret;
1819} 1688}
1820 1689
@@ -1947,7 +1816,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
1947 switch (cmd) { 1816 switch (cmd) {
1948 case HCISETAUTH: 1817 case HCISETAUTH:
1949 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 1818 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1950 HCI_INIT_TIMEOUT); 1819 HCI_INIT_TIMEOUT, NULL);
1951 break; 1820 break;
1952 1821
1953 case HCISETENCRYPT: 1822 case HCISETENCRYPT:
@@ -1959,18 +1828,18 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
1959 if (!test_bit(HCI_AUTH, &hdev->flags)) { 1828 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1960 /* Auth must be enabled first */ 1829 /* Auth must be enabled first */
1961 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 1830 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1962 HCI_INIT_TIMEOUT); 1831 HCI_INIT_TIMEOUT, NULL);
1963 if (err) 1832 if (err)
1964 break; 1833 break;
1965 } 1834 }
1966 1835
1967 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, 1836 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1968 HCI_INIT_TIMEOUT); 1837 HCI_INIT_TIMEOUT, NULL);
1969 break; 1838 break;
1970 1839
1971 case HCISETSCAN: 1840 case HCISETSCAN:
1972 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, 1841 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1973 HCI_INIT_TIMEOUT); 1842 HCI_INIT_TIMEOUT, NULL);
1974 1843
1975 /* Ensure that the connectable and discoverable states 1844 /* Ensure that the connectable and discoverable states
1976 * get correctly modified as this was a non-mgmt change. 1845 * get correctly modified as this was a non-mgmt change.
@@ -1981,7 +1850,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
1981 1850
1982 case HCISETLINKPOL: 1851 case HCISETLINKPOL:
1983 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, 1852 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1984 HCI_INIT_TIMEOUT); 1853 HCI_INIT_TIMEOUT, NULL);
1985 break; 1854 break;
1986 1855
1987 case HCISETLINKMODE: 1856 case HCISETLINKMODE:
@@ -2731,7 +2600,8 @@ struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2731} 2600}
2732 2601
2733/* This function requires the caller holds hdev->lock */ 2602/* This function requires the caller holds hdev->lock */
2734struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) { 2603struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2604{
2735 struct adv_info *cur_instance; 2605 struct adv_info *cur_instance;
2736 2606
2737 cur_instance = hci_find_adv_instance(hdev, instance); 2607 cur_instance = hci_find_adv_instance(hdev, instance);
@@ -3024,181 +2894,16 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3024} 2894}
3025 2895
3026/* This function requires the caller holds hdev->lock */ 2896/* This function requires the caller holds hdev->lock */
3027void hci_conn_params_clear_all(struct hci_dev *hdev) 2897static void hci_conn_params_clear_all(struct hci_dev *hdev)
3028{ 2898{
3029 struct hci_conn_params *params, *tmp; 2899 struct hci_conn_params *params, *tmp;
3030 2900
3031 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) 2901 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3032 hci_conn_params_free(params); 2902 hci_conn_params_free(params);
3033 2903
3034 hci_update_background_scan(hdev);
3035
3036 BT_DBG("All LE connection parameters were removed"); 2904 BT_DBG("All LE connection parameters were removed");
3037} 2905}
3038 2906
3039static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3040{
3041 if (status) {
3042 BT_ERR("Failed to start inquiry: status %d", status);
3043
3044 hci_dev_lock(hdev);
3045 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3046 hci_dev_unlock(hdev);
3047 return;
3048 }
3049}
3050
3051static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3052 u16 opcode)
3053{
3054 /* General inquiry access code (GIAC) */
3055 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3056 struct hci_cp_inquiry cp;
3057 int err;
3058
3059 if (status) {
3060 BT_ERR("Failed to disable LE scanning: status %d", status);
3061 return;
3062 }
3063
3064 hdev->discovery.scan_start = 0;
3065
3066 switch (hdev->discovery.type) {
3067 case DISCOV_TYPE_LE:
3068 hci_dev_lock(hdev);
3069 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3070 hci_dev_unlock(hdev);
3071 break;
3072
3073 case DISCOV_TYPE_INTERLEAVED:
3074 hci_dev_lock(hdev);
3075
3076 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3077 &hdev->quirks)) {
3078 /* If we were running LE only scan, change discovery
3079 * state. If we were running both LE and BR/EDR inquiry
3080 * simultaneously, and BR/EDR inquiry is already
3081 * finished, stop discovery, otherwise BR/EDR inquiry
3082 * will stop discovery when finished. If we will resolve
3083 * remote device name, do not change discovery state.
3084 */
3085 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3086 hdev->discovery.state != DISCOVERY_RESOLVING)
3087 hci_discovery_set_state(hdev,
3088 DISCOVERY_STOPPED);
3089 } else {
3090 struct hci_request req;
3091
3092 hci_inquiry_cache_flush(hdev);
3093
3094 hci_req_init(&req, hdev);
3095
3096 memset(&cp, 0, sizeof(cp));
3097 memcpy(&cp.lap, lap, sizeof(cp.lap));
3098 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3099 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3100
3101 err = hci_req_run(&req, inquiry_complete);
3102 if (err) {
3103 BT_ERR("Inquiry request failed: err %d", err);
3104 hci_discovery_set_state(hdev,
3105 DISCOVERY_STOPPED);
3106 }
3107 }
3108
3109 hci_dev_unlock(hdev);
3110 break;
3111 }
3112}
3113
3114static void le_scan_disable_work(struct work_struct *work)
3115{
3116 struct hci_dev *hdev = container_of(work, struct hci_dev,
3117 le_scan_disable.work);
3118 struct hci_request req;
3119 int err;
3120
3121 BT_DBG("%s", hdev->name);
3122
3123 cancel_delayed_work_sync(&hdev->le_scan_restart);
3124
3125 hci_req_init(&req, hdev);
3126
3127 hci_req_add_le_scan_disable(&req);
3128
3129 err = hci_req_run(&req, le_scan_disable_work_complete);
3130 if (err)
3131 BT_ERR("Disable LE scanning request failed: err %d", err);
3132}
3133
3134static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3135 u16 opcode)
3136{
3137 unsigned long timeout, duration, scan_start, now;
3138
3139 BT_DBG("%s", hdev->name);
3140
3141 if (status) {
3142 BT_ERR("Failed to restart LE scan: status %d", status);
3143 return;
3144 }
3145
3146 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3147 !hdev->discovery.scan_start)
3148 return;
3149
3150 /* When the scan was started, hdev->le_scan_disable has been queued
3151 * after duration from scan_start. During scan restart this job
3152 * has been canceled, and we need to queue it again after proper
3153 * timeout, to make sure that scan does not run indefinitely.
3154 */
3155 duration = hdev->discovery.scan_duration;
3156 scan_start = hdev->discovery.scan_start;
3157 now = jiffies;
3158 if (now - scan_start <= duration) {
3159 int elapsed;
3160
3161 if (now >= scan_start)
3162 elapsed = now - scan_start;
3163 else
3164 elapsed = ULONG_MAX - scan_start + now;
3165
3166 timeout = duration - elapsed;
3167 } else {
3168 timeout = 0;
3169 }
3170 queue_delayed_work(hdev->workqueue,
3171 &hdev->le_scan_disable, timeout);
3172}
3173
3174static void le_scan_restart_work(struct work_struct *work)
3175{
3176 struct hci_dev *hdev = container_of(work, struct hci_dev,
3177 le_scan_restart.work);
3178 struct hci_request req;
3179 struct hci_cp_le_set_scan_enable cp;
3180 int err;
3181
3182 BT_DBG("%s", hdev->name);
3183
3184 /* If controller is not scanning we are done. */
3185 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3186 return;
3187
3188 hci_req_init(&req, hdev);
3189
3190 hci_req_add_le_scan_disable(&req);
3191
3192 memset(&cp, 0, sizeof(cp));
3193 cp.enable = LE_SCAN_ENABLE;
3194 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3195 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3196
3197 err = hci_req_run(&req, le_scan_restart_work_complete);
3198 if (err)
3199 BT_ERR("Restart LE scan request failed: err %d", err);
3200}
3201
3202/* Copy the Identity Address of the controller. 2907/* Copy the Identity Address of the controller.
3203 * 2908 *
3204 * If the controller has a public BD_ADDR, then by default use that one. 2909 * If the controller has a public BD_ADDR, then by default use that one.
@@ -3298,8 +3003,6 @@ struct hci_dev *hci_alloc_dev(void)
3298 3003
3299 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 3004 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3300 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); 3005 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3301 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3302 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3303 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire); 3006 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3304 3007
3305 skb_queue_head_init(&hdev->rx_q); 3008 skb_queue_head_init(&hdev->rx_q);
@@ -3310,6 +3013,8 @@ struct hci_dev *hci_alloc_dev(void)
3310 3013
3311 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); 3014 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3312 3015
3016 hci_request_setup(hdev);
3017
3313 hci_init_sysfs(hdev); 3018 hci_init_sysfs(hdev);
3314 discovery_init(hdev); 3019 discovery_init(hdev);
3315 3020
@@ -3520,7 +3225,7 @@ int hci_reset_dev(struct hci_dev *hdev)
3520 if (!skb) 3225 if (!skb)
3521 return -ENOMEM; 3226 return -ENOMEM;
3522 3227
3523 bt_cb(skb)->pkt_type = HCI_EVENT_PKT; 3228 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3524 memcpy(skb_put(skb, 3), hw_err, 3); 3229 memcpy(skb_put(skb, 3), hw_err, 3);
3525 3230
3526 /* Send Hardware Error to upper stack */ 3231 /* Send Hardware Error to upper stack */
@@ -3537,9 +3242,9 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3537 return -ENXIO; 3242 return -ENXIO;
3538 } 3243 }
3539 3244
3540 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT && 3245 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3541 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && 3246 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3542 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { 3247 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3543 kfree_skb(skb); 3248 kfree_skb(skb);
3544 return -EINVAL; 3249 return -EINVAL;
3545 } 3250 }
@@ -3561,7 +3266,7 @@ EXPORT_SYMBOL(hci_recv_frame);
3561int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) 3266int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3562{ 3267{
3563 /* Mark as diagnostic packet */ 3268 /* Mark as diagnostic packet */
3564 bt_cb(skb)->pkt_type = HCI_DIAG_PKT; 3269 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3565 3270
3566 /* Time stamp */ 3271 /* Time stamp */
3567 __net_timestamp(skb); 3272 __net_timestamp(skb);
@@ -3603,7 +3308,8 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3603{ 3308{
3604 int err; 3309 int err;
3605 3310
3606 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 3311 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3312 skb->len);
3607 3313
3608 /* Time stamp */ 3314 /* Time stamp */
3609 __net_timestamp(skb); 3315 __net_timestamp(skb);
@@ -3648,7 +3354,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3648 /* Stand-alone HCI commands must be flagged as 3354 /* Stand-alone HCI commands must be flagged as
3649 * single-command requests. 3355 * single-command requests.
3650 */ 3356 */
3651 bt_cb(skb)->hci.req_start = true; 3357 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3652 3358
3653 skb_queue_tail(&hdev->cmd_q, skb); 3359 skb_queue_tail(&hdev->cmd_q, skb);
3654 queue_work(hdev->workqueue, &hdev->cmd_work); 3360 queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -3685,9 +3391,9 @@ struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3685 3391
3686 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); 3392 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3687 3393
3688 hci_req_lock(hdev); 3394 hci_req_sync_lock(hdev);
3689 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); 3395 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3690 hci_req_unlock(hdev); 3396 hci_req_sync_unlock(hdev);
3691 3397
3692 return skb; 3398 return skb;
3693} 3399}
@@ -3716,7 +3422,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3716 skb->len = skb_headlen(skb); 3422 skb->len = skb_headlen(skb);
3717 skb->data_len = 0; 3423 skb->data_len = 0;
3718 3424
3719 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 3425 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3720 3426
3721 switch (hdev->dev_type) { 3427 switch (hdev->dev_type) {
3722 case HCI_BREDR: 3428 case HCI_BREDR:
@@ -3756,7 +3462,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3756 do { 3462 do {
3757 skb = list; list = list->next; 3463 skb = list; list = list->next;
3758 3464
3759 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 3465 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3760 hci_add_acl_hdr(skb, conn->handle, flags); 3466 hci_add_acl_hdr(skb, conn->handle, flags);
3761 3467
3762 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3468 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -3794,7 +3500,7 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3794 skb_reset_transport_header(skb); 3500 skb_reset_transport_header(skb);
3795 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 3501 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3796 3502
3797 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; 3503 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3798 3504
3799 skb_queue_tail(&conn->data_q, skb); 3505 skb_queue_tail(&conn->data_q, skb);
3800 queue_work(hdev->workqueue, &hdev->tx_work); 3506 queue_work(hdev->workqueue, &hdev->tx_work);
@@ -4345,7 +4051,7 @@ static bool hci_req_is_complete(struct hci_dev *hdev)
4345 if (!skb) 4051 if (!skb)
4346 return true; 4052 return true;
4347 4053
4348 return bt_cb(skb)->hci.req_start; 4054 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4349} 4055}
4350 4056
4351static void hci_resend_last(struct hci_dev *hdev) 4057static void hci_resend_last(struct hci_dev *hdev)
@@ -4405,20 +4111,20 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4405 * callback would be found in hdev->sent_cmd instead of the 4111 * callback would be found in hdev->sent_cmd instead of the
4406 * command queue (hdev->cmd_q). 4112 * command queue (hdev->cmd_q).
4407 */ 4113 */
4408 if (bt_cb(hdev->sent_cmd)->hci.req_complete) { 4114 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4409 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; 4115 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4410 return; 4116 return;
4411 } 4117 }
4412 4118
4413 if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) { 4119 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4414 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; 4120 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4415 return; 4121 return;
4416 } 4122 }
4417 4123
4418 /* Remove all pending commands belonging to this request */ 4124 /* Remove all pending commands belonging to this request */
4419 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 4125 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4420 while ((skb = __skb_dequeue(&hdev->cmd_q))) { 4126 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4421 if (bt_cb(skb)->hci.req_start) { 4127 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4422 __skb_queue_head(&hdev->cmd_q, skb); 4128 __skb_queue_head(&hdev->cmd_q, skb);
4423 break; 4129 break;
4424 } 4130 }
@@ -4453,7 +4159,7 @@ static void hci_rx_work(struct work_struct *work)
4453 4159
4454 if (test_bit(HCI_INIT, &hdev->flags)) { 4160 if (test_bit(HCI_INIT, &hdev->flags)) {
4455 /* Don't process data packets in this states. */ 4161 /* Don't process data packets in this states. */
4456 switch (bt_cb(skb)->pkt_type) { 4162 switch (hci_skb_pkt_type(skb)) {
4457 case HCI_ACLDATA_PKT: 4163 case HCI_ACLDATA_PKT:
4458 case HCI_SCODATA_PKT: 4164 case HCI_SCODATA_PKT:
4459 kfree_skb(skb); 4165 kfree_skb(skb);
@@ -4462,7 +4168,7 @@ static void hci_rx_work(struct work_struct *work)
4462 } 4168 }
4463 4169
4464 /* Process frame */ 4170 /* Process frame */
4465 switch (bt_cb(skb)->pkt_type) { 4171 switch (hci_skb_pkt_type(skb)) {
4466 case HCI_EVENT_PKT: 4172 case HCI_EVENT_PKT:
4467 BT_DBG("%s Event packet", hdev->name); 4173 BT_DBG("%s Event packet", hdev->name);
4468 hci_event_packet(hdev, skb); 4174 hci_event_packet(hdev, skb);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 981f8a202c27..e639671f54bd 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -27,6 +27,10 @@
27#include "smp.h" 27#include "smp.h"
28#include "hci_request.h" 28#include "hci_request.h"
29 29
30#define HCI_REQ_DONE 0
31#define HCI_REQ_PEND 1
32#define HCI_REQ_CANCELED 2
33
30void hci_req_init(struct hci_request *req, struct hci_dev *hdev) 34void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
31{ 35{
32 skb_queue_head_init(&req->cmd_q); 36 skb_queue_head_init(&req->cmd_q);
@@ -56,8 +60,12 @@ static int req_run(struct hci_request *req, hci_req_complete_t complete,
56 return -ENODATA; 60 return -ENODATA;
57 61
58 skb = skb_peek_tail(&req->cmd_q); 62 skb = skb_peek_tail(&req->cmd_q);
59 bt_cb(skb)->hci.req_complete = complete; 63 if (complete) {
60 bt_cb(skb)->hci.req_complete_skb = complete_skb; 64 bt_cb(skb)->hci.req_complete = complete;
65 } else if (complete_skb) {
66 bt_cb(skb)->hci.req_complete_skb = complete_skb;
67 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
68 }
61 69
62 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 70 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
63 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); 71 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
@@ -78,6 +86,203 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
78 return req_run(req, NULL, complete); 86 return req_run(req, NULL, complete);
79} 87}
80 88
89static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
90 struct sk_buff *skb)
91{
92 BT_DBG("%s result 0x%2.2x", hdev->name, result);
93
94 if (hdev->req_status == HCI_REQ_PEND) {
95 hdev->req_result = result;
96 hdev->req_status = HCI_REQ_DONE;
97 if (skb)
98 hdev->req_skb = skb_get(skb);
99 wake_up_interruptible(&hdev->req_wait_q);
100 }
101}
102
103void hci_req_sync_cancel(struct hci_dev *hdev, int err)
104{
105 BT_DBG("%s err 0x%2.2x", hdev->name, err);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = err;
109 hdev->req_status = HCI_REQ_CANCELED;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
115 const void *param, u8 event, u32 timeout)
116{
117 DECLARE_WAITQUEUE(wait, current);
118 struct hci_request req;
119 struct sk_buff *skb;
120 int err = 0;
121
122 BT_DBG("%s", hdev->name);
123
124 hci_req_init(&req, hdev);
125
126 hci_req_add_ev(&req, opcode, plen, param, event);
127
128 hdev->req_status = HCI_REQ_PEND;
129
130 add_wait_queue(&hdev->req_wait_q, &wait);
131 set_current_state(TASK_INTERRUPTIBLE);
132
133 err = hci_req_run_skb(&req, hci_req_sync_complete);
134 if (err < 0) {
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_RUNNING);
137 return ERR_PTR(err);
138 }
139
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
187int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188 unsigned long opt),
189 unsigned long opt, u32 timeout, u8 *hci_status)
190{
191 struct hci_request req;
192 DECLARE_WAITQUEUE(wait, current);
193 int err = 0;
194
195 BT_DBG("%s start", hdev->name);
196
197 hci_req_init(&req, hdev);
198
199 hdev->req_status = HCI_REQ_PEND;
200
201 err = func(&req, opt);
202 if (err) {
203 if (hci_status)
204 *hci_status = HCI_ERROR_UNSPECIFIED;
205 return err;
206 }
207
208 add_wait_queue(&hdev->req_wait_q, &wait);
209 set_current_state(TASK_INTERRUPTIBLE);
210
211 err = hci_req_run_skb(&req, hci_req_sync_complete);
212 if (err < 0) {
213 hdev->req_status = 0;
214
215 remove_wait_queue(&hdev->req_wait_q, &wait);
216 set_current_state(TASK_RUNNING);
217
218 /* ENODATA means the HCI request command queue is empty.
219 * This can happen when a request with conditionals doesn't
220 * trigger any commands to be sent. This is normal behavior
221 * and should not trigger an error return.
222 */
223 if (err == -ENODATA) {
224 if (hci_status)
225 *hci_status = 0;
226 return 0;
227 }
228
229 if (hci_status)
230 *hci_status = HCI_ERROR_UNSPECIFIED;
231
232 return err;
233 }
234
235 schedule_timeout(timeout);
236
237 remove_wait_queue(&hdev->req_wait_q, &wait);
238
239 if (signal_pending(current))
240 return -EINTR;
241
242 switch (hdev->req_status) {
243 case HCI_REQ_DONE:
244 err = -bt_to_errno(hdev->req_result);
245 if (hci_status)
246 *hci_status = hdev->req_result;
247 break;
248
249 case HCI_REQ_CANCELED:
250 err = -hdev->req_result;
251 if (hci_status)
252 *hci_status = HCI_ERROR_UNSPECIFIED;
253 break;
254
255 default:
256 err = -ETIMEDOUT;
257 if (hci_status)
258 *hci_status = HCI_ERROR_UNSPECIFIED;
259 break;
260 }
261
262 hdev->req_status = hdev->req_result = 0;
263
264 BT_DBG("%s end: err %d", hdev->name, err);
265
266 return err;
267}
268
269int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 unsigned long opt),
271 unsigned long opt, u32 timeout, u8 *hci_status)
272{
273 int ret;
274
275 if (!test_bit(HCI_UP, &hdev->flags))
276 return -ENETDOWN;
277
278 /* Serialize all requests */
279 hci_req_sync_lock(hdev);
280 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
281 hci_req_sync_unlock(hdev);
282
283 return ret;
284}
285
81struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, 286struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
82 const void *param) 287 const void *param)
83{ 288{
@@ -98,8 +303,8 @@ struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
98 303
99 BT_DBG("skb len %d", skb->len); 304 BT_DBG("skb len %d", skb->len);
100 305
101 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 306 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
102 bt_cb(skb)->hci.opcode = opcode; 307 hci_skb_opcode(skb) = opcode;
103 308
104 return skb; 309 return skb;
105} 310}
@@ -128,7 +333,7 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
128 } 333 }
129 334
130 if (skb_queue_empty(&req->cmd_q)) 335 if (skb_queue_empty(&req->cmd_q))
131 bt_cb(skb)->hci.req_start = true; 336 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
132 337
133 bt_cb(skb)->hci.req_event = event; 338 bt_cb(skb)->hci.req_event = event;
134 339
@@ -476,7 +681,7 @@ void hci_update_page_scan(struct hci_dev *hdev)
476 * 681 *
477 * This function requires the caller holds hdev->lock. 682 * This function requires the caller holds hdev->lock.
478 */ 683 */
479void __hci_update_background_scan(struct hci_request *req) 684static void __hci_update_background_scan(struct hci_request *req)
480{ 685{
481 struct hci_dev *hdev = req->hdev; 686 struct hci_dev *hdev = req->hdev;
482 687
@@ -543,28 +748,6 @@ void __hci_update_background_scan(struct hci_request *req)
543 } 748 }
544} 749}
545 750
546static void update_background_scan_complete(struct hci_dev *hdev, u8 status,
547 u16 opcode)
548{
549 if (status)
550 BT_DBG("HCI request failed to update background scanning: "
551 "status 0x%2.2x", status);
552}
553
554void hci_update_background_scan(struct hci_dev *hdev)
555{
556 int err;
557 struct hci_request req;
558
559 hci_req_init(&req, hdev);
560
561 __hci_update_background_scan(&req);
562
563 err = hci_req_run(&req, update_background_scan_complete);
564 if (err && err != -ENODATA)
565 BT_ERR("Failed to run HCI request: err %d", err);
566}
567
568void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, 751void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
569 u8 reason) 752 u8 reason)
570{ 753{
@@ -657,3 +840,446 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
657 840
658 return 0; 841 return 0;
659} 842}
843
844static int update_bg_scan(struct hci_request *req, unsigned long opt)
845{
846 hci_dev_lock(req->hdev);
847 __hci_update_background_scan(req);
848 hci_dev_unlock(req->hdev);
849 return 0;
850}
851
852static void bg_scan_update(struct work_struct *work)
853{
854 struct hci_dev *hdev = container_of(work, struct hci_dev,
855 bg_scan_update);
856 struct hci_conn *conn;
857 u8 status;
858 int err;
859
860 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
861 if (!err)
862 return;
863
864 hci_dev_lock(hdev);
865
866 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
867 if (conn)
868 hci_le_conn_failed(conn, status);
869
870 hci_dev_unlock(hdev);
871}
872
873static int le_scan_disable(struct hci_request *req, unsigned long opt)
874{
875 hci_req_add_le_scan_disable(req);
876 return 0;
877}
878
879static int bredr_inquiry(struct hci_request *req, unsigned long opt)
880{
881 u8 length = opt;
882 /* General inquiry access code (GIAC) */
883 u8 lap[3] = { 0x33, 0x8b, 0x9e };
884 struct hci_cp_inquiry cp;
885
886 BT_DBG("%s", req->hdev->name);
887
888 hci_dev_lock(req->hdev);
889 hci_inquiry_cache_flush(req->hdev);
890 hci_dev_unlock(req->hdev);
891
892 memset(&cp, 0, sizeof(cp));
893 memcpy(&cp.lap, lap, sizeof(cp.lap));
894 cp.length = length;
895
896 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
897
898 return 0;
899}
900
901static void le_scan_disable_work(struct work_struct *work)
902{
903 struct hci_dev *hdev = container_of(work, struct hci_dev,
904 le_scan_disable.work);
905 u8 status;
906
907 BT_DBG("%s", hdev->name);
908
909 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
910 return;
911
912 cancel_delayed_work(&hdev->le_scan_restart);
913
914 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
915 if (status) {
916 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
917 return;
918 }
919
920 hdev->discovery.scan_start = 0;
921
922 /* If we were running LE only scan, change discovery state. If
923 * we were running both LE and BR/EDR inquiry simultaneously,
924 * and BR/EDR inquiry is already finished, stop discovery,
925 * otherwise BR/EDR inquiry will stop discovery when finished.
926 * If we will resolve remote device name, do not change
927 * discovery state.
928 */
929
930 if (hdev->discovery.type == DISCOV_TYPE_LE)
931 goto discov_stopped;
932
933 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
934 return;
935
936 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
937 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
938 hdev->discovery.state != DISCOVERY_RESOLVING)
939 goto discov_stopped;
940
941 return;
942 }
943
944 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
945 HCI_CMD_TIMEOUT, &status);
946 if (status) {
947 BT_ERR("Inquiry failed: status 0x%02x", status);
948 goto discov_stopped;
949 }
950
951 return;
952
953discov_stopped:
954 hci_dev_lock(hdev);
955 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
956 hci_dev_unlock(hdev);
957}
958
959static int le_scan_restart(struct hci_request *req, unsigned long opt)
960{
961 struct hci_dev *hdev = req->hdev;
962 struct hci_cp_le_set_scan_enable cp;
963
964 /* If controller is not scanning we are done. */
965 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
966 return 0;
967
968 hci_req_add_le_scan_disable(req);
969
970 memset(&cp, 0, sizeof(cp));
971 cp.enable = LE_SCAN_ENABLE;
972 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
973 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
974
975 return 0;
976}
977
978static void le_scan_restart_work(struct work_struct *work)
979{
980 struct hci_dev *hdev = container_of(work, struct hci_dev,
981 le_scan_restart.work);
982 unsigned long timeout, duration, scan_start, now;
983 u8 status;
984
985 BT_DBG("%s", hdev->name);
986
987 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
988 if (status) {
989 BT_ERR("Failed to restart LE scan: status %d", status);
990 return;
991 }
992
993 hci_dev_lock(hdev);
994
995 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
996 !hdev->discovery.scan_start)
997 goto unlock;
998
999 /* When the scan was started, hdev->le_scan_disable has been queued
1000 * after duration from scan_start. During scan restart this job
1001 * has been canceled, and we need to queue it again after proper
1002 * timeout, to make sure that scan does not run indefinitely.
1003 */
1004 duration = hdev->discovery.scan_duration;
1005 scan_start = hdev->discovery.scan_start;
1006 now = jiffies;
1007 if (now - scan_start <= duration) {
1008 int elapsed;
1009
1010 if (now >= scan_start)
1011 elapsed = now - scan_start;
1012 else
1013 elapsed = ULONG_MAX - scan_start + now;
1014
1015 timeout = duration - elapsed;
1016 } else {
1017 timeout = 0;
1018 }
1019
1020 queue_delayed_work(hdev->req_workqueue,
1021 &hdev->le_scan_disable, timeout);
1022
1023unlock:
1024 hci_dev_unlock(hdev);
1025}
1026
1027static void cancel_adv_timeout(struct hci_dev *hdev)
1028{
1029 if (hdev->adv_instance_timeout) {
1030 hdev->adv_instance_timeout = 0;
1031 cancel_delayed_work(&hdev->adv_instance_expire);
1032 }
1033}
1034
1035static void disable_advertising(struct hci_request *req)
1036{
1037 u8 enable = 0x00;
1038
1039 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1040}
1041
1042static int active_scan(struct hci_request *req, unsigned long opt)
1043{
1044 uint16_t interval = opt;
1045 struct hci_dev *hdev = req->hdev;
1046 struct hci_cp_le_set_scan_param param_cp;
1047 struct hci_cp_le_set_scan_enable enable_cp;
1048 u8 own_addr_type;
1049 int err;
1050
1051 BT_DBG("%s", hdev->name);
1052
1053 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1054 hci_dev_lock(hdev);
1055
1056 /* Don't let discovery abort an outgoing connection attempt
1057 * that's using directed advertising.
1058 */
1059 if (hci_lookup_le_connect(hdev)) {
1060 hci_dev_unlock(hdev);
1061 return -EBUSY;
1062 }
1063
1064 cancel_adv_timeout(hdev);
1065 hci_dev_unlock(hdev);
1066
1067 disable_advertising(req);
1068 }
1069
1070 /* If controller is scanning, it means the background scanning is
1071 * running. Thus, we should temporarily stop it in order to set the
1072 * discovery scanning parameters.
1073 */
1074 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1075 hci_req_add_le_scan_disable(req);
1076
1077 /* All active scans will be done with either a resolvable private
1078 * address (when privacy feature has been enabled) or non-resolvable
1079 * private address.
1080 */
1081 err = hci_update_random_address(req, true, &own_addr_type);
1082 if (err < 0)
1083 own_addr_type = ADDR_LE_DEV_PUBLIC;
1084
1085 memset(&param_cp, 0, sizeof(param_cp));
1086 param_cp.type = LE_SCAN_ACTIVE;
1087 param_cp.interval = cpu_to_le16(interval);
1088 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1089 param_cp.own_address_type = own_addr_type;
1090
1091 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1092 &param_cp);
1093
1094 memset(&enable_cp, 0, sizeof(enable_cp));
1095 enable_cp.enable = LE_SCAN_ENABLE;
1096 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1097
1098 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1099 &enable_cp);
1100
1101 return 0;
1102}
1103
1104static int interleaved_discov(struct hci_request *req, unsigned long opt)
1105{
1106 int err;
1107
1108 BT_DBG("%s", req->hdev->name);
1109
1110 err = active_scan(req, opt);
1111 if (err)
1112 return err;
1113
1114 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
1115}
1116
1117static void start_discovery(struct hci_dev *hdev, u8 *status)
1118{
1119 unsigned long timeout;
1120
1121 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1122
1123 switch (hdev->discovery.type) {
1124 case DISCOV_TYPE_BREDR:
1125 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
1126 hci_req_sync(hdev, bredr_inquiry,
1127 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
1128 status);
1129 return;
1130 case DISCOV_TYPE_INTERLEAVED:
1131 /* When running simultaneous discovery, the LE scanning time
1132 * should occupy the whole discovery time sine BR/EDR inquiry
1133 * and LE scanning are scheduled by the controller.
1134 *
1135 * For interleaving discovery in comparison, BR/EDR inquiry
1136 * and LE scanning are done sequentially with separate
1137 * timeouts.
1138 */
1139 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1140 &hdev->quirks)) {
1141 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1142 /* During simultaneous discovery, we double LE scan
1143 * interval. We must leave some time for the controller
1144 * to do BR/EDR inquiry.
1145 */
1146 hci_req_sync(hdev, interleaved_discov,
1147 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
1148 status);
1149 break;
1150 }
1151
1152 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
1153 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1154 HCI_CMD_TIMEOUT, status);
1155 break;
1156 case DISCOV_TYPE_LE:
1157 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1158 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1159 HCI_CMD_TIMEOUT, status);
1160 break;
1161 default:
1162 *status = HCI_ERROR_UNSPECIFIED;
1163 return;
1164 }
1165
1166 if (*status)
1167 return;
1168
1169 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
1170
1171 /* When service discovery is used and the controller has a
1172 * strict duplicate filter, it is important to remember the
1173 * start and duration of the scan. This is required for
1174 * restarting scanning during the discovery phase.
1175 */
1176 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
1177 hdev->discovery.result_filtering) {
1178 hdev->discovery.scan_start = jiffies;
1179 hdev->discovery.scan_duration = timeout;
1180 }
1181
1182 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
1183 timeout);
1184}
1185
1186bool hci_req_stop_discovery(struct hci_request *req)
1187{
1188 struct hci_dev *hdev = req->hdev;
1189 struct discovery_state *d = &hdev->discovery;
1190 struct hci_cp_remote_name_req_cancel cp;
1191 struct inquiry_entry *e;
1192 bool ret = false;
1193
1194 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
1195
1196 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
1197 if (test_bit(HCI_INQUIRY, &hdev->flags))
1198 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1199
1200 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1201 cancel_delayed_work(&hdev->le_scan_disable);
1202 hci_req_add_le_scan_disable(req);
1203 }
1204
1205 ret = true;
1206 } else {
1207 /* Passive scanning */
1208 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1209 hci_req_add_le_scan_disable(req);
1210 ret = true;
1211 }
1212 }
1213
1214 /* No further actions needed for LE-only discovery */
1215 if (d->type == DISCOV_TYPE_LE)
1216 return ret;
1217
1218 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
1219 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1220 NAME_PENDING);
1221 if (!e)
1222 return ret;
1223
1224 bacpy(&cp.bdaddr, &e->data.bdaddr);
1225 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1226 &cp);
1227 ret = true;
1228 }
1229
1230 return ret;
1231}
1232
1233static int stop_discovery(struct hci_request *req, unsigned long opt)
1234{
1235 hci_dev_lock(req->hdev);
1236 hci_req_stop_discovery(req);
1237 hci_dev_unlock(req->hdev);
1238
1239 return 0;
1240}
1241
1242static void discov_update(struct work_struct *work)
1243{
1244 struct hci_dev *hdev = container_of(work, struct hci_dev,
1245 discov_update);
1246 u8 status = 0;
1247
1248 switch (hdev->discovery.state) {
1249 case DISCOVERY_STARTING:
1250 start_discovery(hdev, &status);
1251 mgmt_start_discovery_complete(hdev, status);
1252 if (status)
1253 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1254 else
1255 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1256 break;
1257 case DISCOVERY_STOPPING:
1258 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
1259 mgmt_stop_discovery_complete(hdev, status);
1260 if (!status)
1261 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1262 break;
1263 case DISCOVERY_STOPPED:
1264 default:
1265 return;
1266 }
1267}
1268
1269void hci_request_setup(struct hci_dev *hdev)
1270{
1271 INIT_WORK(&hdev->discov_update, discov_update);
1272 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
1273 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1274 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
1275}
1276
1277void hci_request_cancel_all(struct hci_dev *hdev)
1278{
1279 hci_req_sync_cancel(hdev, ENODEV);
1280
1281 cancel_work_sync(&hdev->discov_update);
1282 cancel_work_sync(&hdev->bg_scan_update);
1283 cancel_delayed_work_sync(&hdev->le_scan_disable);
1284 cancel_delayed_work_sync(&hdev->le_scan_restart);
1285}
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 25c7f1305dcb..6b9e59f7f7a9 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -20,6 +20,9 @@
20 SOFTWARE IS DISCLAIMED. 20 SOFTWARE IS DISCLAIMED.
21*/ 21*/
22 22
23#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
24#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
25
23struct hci_request { 26struct hci_request {
24 struct hci_dev *hdev; 27 struct hci_dev *hdev;
25 struct sk_buff_head cmd_q; 28 struct sk_buff_head cmd_q;
@@ -41,21 +44,37 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
41 hci_req_complete_t *req_complete, 44 hci_req_complete_t *req_complete,
42 hci_req_complete_skb_t *req_complete_skb); 45 hci_req_complete_skb_t *req_complete_skb);
43 46
47int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
48 unsigned long opt),
49 unsigned long opt, u32 timeout, u8 *hci_status);
50int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
51 unsigned long opt),
52 unsigned long opt, u32 timeout, u8 *hci_status);
53void hci_req_sync_cancel(struct hci_dev *hdev, int err);
54
44struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, 55struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
45 const void *param); 56 const void *param);
46 57
47void hci_req_add_le_scan_disable(struct hci_request *req); 58void hci_req_add_le_scan_disable(struct hci_request *req);
48void hci_req_add_le_passive_scan(struct hci_request *req); 59void hci_req_add_le_passive_scan(struct hci_request *req);
49 60
61/* Returns true if HCI commands were queued */
62bool hci_req_stop_discovery(struct hci_request *req);
63
50void hci_update_page_scan(struct hci_dev *hdev); 64void hci_update_page_scan(struct hci_dev *hdev);
51void __hci_update_page_scan(struct hci_request *req); 65void __hci_update_page_scan(struct hci_request *req);
52 66
53int hci_update_random_address(struct hci_request *req, bool require_privacy, 67int hci_update_random_address(struct hci_request *req, bool require_privacy,
54 u8 *own_addr_type); 68 u8 *own_addr_type);
55 69
56void hci_update_background_scan(struct hci_dev *hdev);
57void __hci_update_background_scan(struct hci_request *req);
58
59int hci_abort_conn(struct hci_conn *conn, u8 reason); 70int hci_abort_conn(struct hci_conn *conn, u8 reason);
60void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, 71void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
61 u8 reason); 72 u8 reason);
73
74static inline void hci_update_background_scan(struct hci_dev *hdev)
75{
76 queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
77}
78
79void hci_request_setup(struct hci_dev *hdev);
80void hci_request_cancel_all(struct hci_dev *hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index b1eb8c09a660..41f579ba447b 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -26,6 +26,8 @@
26 26
27#include <linux/export.h> 27#include <linux/export.h>
28#include <asm/unaligned.h> 28#include <asm/unaligned.h>
29#include <generated/compile.h>
30#include <generated/utsrelease.h>
29 31
30#include <net/bluetooth/bluetooth.h> 32#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h> 33#include <net/bluetooth/hci_core.h>
@@ -120,13 +122,13 @@ static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
120 /* Apply filter */ 122 /* Apply filter */
121 flt = &hci_pi(sk)->filter; 123 flt = &hci_pi(sk)->filter;
122 124
123 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS; 125 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
124 126
125 if (!test_bit(flt_type, &flt->type_mask)) 127 if (!test_bit(flt_type, &flt->type_mask))
126 return true; 128 return true;
127 129
128 /* Extra filter for event packets only */ 130 /* Extra filter for event packets only */
129 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT) 131 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
130 return false; 132 return false;
131 133
132 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); 134 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
@@ -170,19 +172,19 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
170 continue; 172 continue;
171 173
172 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) { 174 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
173 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT && 175 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
174 bt_cb(skb)->pkt_type != HCI_EVENT_PKT && 176 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
175 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && 177 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
176 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) 178 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
177 continue; 179 continue;
178 if (is_filtered_packet(sk, skb)) 180 if (is_filtered_packet(sk, skb))
179 continue; 181 continue;
180 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { 182 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
181 if (!bt_cb(skb)->incoming) 183 if (!bt_cb(skb)->incoming)
182 continue; 184 continue;
183 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT && 185 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
184 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && 186 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
185 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) 187 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
186 continue; 188 continue;
187 } else { 189 } else {
188 /* Don't send frame to other channel types */ 190 /* Don't send frame to other channel types */
@@ -196,7 +198,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
196 continue; 198 continue;
197 199
198 /* Put type byte before the data */ 200 /* Put type byte before the data */
199 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1); 201 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
200 } 202 }
201 203
202 nskb = skb_clone(skb_copy, GFP_ATOMIC); 204 nskb = skb_clone(skb_copy, GFP_ATOMIC);
@@ -262,7 +264,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
262 264
263 BT_DBG("hdev %p len %d", hdev, skb->len); 265 BT_DBG("hdev %p len %d", hdev, skb->len);
264 266
265 switch (bt_cb(skb)->pkt_type) { 267 switch (hci_skb_pkt_type(skb)) {
266 case HCI_COMMAND_PKT: 268 case HCI_COMMAND_PKT:
267 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT); 269 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
268 break; 270 break;
@@ -294,7 +296,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
294 return; 296 return;
295 297
296 /* Put header before the data */ 298 /* Put header before the data */
297 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE); 299 hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
298 hdr->opcode = opcode; 300 hdr->opcode = opcode;
299 hdr->index = cpu_to_le16(hdev->id); 301 hdr->index = cpu_to_le16(hdev->id);
300 hdr->len = cpu_to_le16(skb->len); 302 hdr->len = cpu_to_le16(skb->len);
@@ -375,7 +377,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
375 377
376 __net_timestamp(skb); 378 __net_timestamp(skb);
377 379
378 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE); 380 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
379 hdr->opcode = opcode; 381 hdr->opcode = opcode;
380 hdr->index = cpu_to_le16(hdev->id); 382 hdr->index = cpu_to_le16(hdev->id);
381 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); 383 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
@@ -383,6 +385,29 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
383 return skb; 385 return skb;
384} 386}
385 387
388static void send_monitor_note(struct sock *sk, const char *text)
389{
390 size_t len = strlen(text);
391 struct hci_mon_hdr *hdr;
392 struct sk_buff *skb;
393
394 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
395 if (!skb)
396 return;
397
398 strcpy(skb_put(skb, len + 1), text);
399
400 __net_timestamp(skb);
401
402 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
403 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
404 hdr->index = cpu_to_le16(HCI_DEV_NONE);
405 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
406
407 if (sock_queue_rcv_skb(sk, skb))
408 kfree_skb(skb);
409}
410
386static void send_monitor_replay(struct sock *sk) 411static void send_monitor_replay(struct sock *sk)
387{ 412{
388 struct hci_dev *hdev; 413 struct hci_dev *hdev;
@@ -436,18 +461,18 @@ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
436 if (!skb) 461 if (!skb)
437 return; 462 return;
438 463
439 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE); 464 hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
440 hdr->evt = HCI_EV_STACK_INTERNAL; 465 hdr->evt = HCI_EV_STACK_INTERNAL;
441 hdr->plen = sizeof(*ev) + dlen; 466 hdr->plen = sizeof(*ev) + dlen;
442 467
443 ev = (void *) skb_put(skb, sizeof(*ev) + dlen); 468 ev = (void *)skb_put(skb, sizeof(*ev) + dlen);
444 ev->type = type; 469 ev->type = type;
445 memcpy(ev->data, data, dlen); 470 memcpy(ev->data, data, dlen);
446 471
447 bt_cb(skb)->incoming = 1; 472 bt_cb(skb)->incoming = 1;
448 __net_timestamp(skb); 473 __net_timestamp(skb);
449 474
450 bt_cb(skb)->pkt_type = HCI_EVENT_PKT; 475 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
451 hci_send_to_sock(hdev, skb); 476 hci_send_to_sock(hdev, skb);
452 kfree_skb(skb); 477 kfree_skb(skb);
453} 478}
@@ -653,20 +678,20 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
653 return -EOPNOTSUPP; 678 return -EOPNOTSUPP;
654 679
655 case HCIGETCONNINFO: 680 case HCIGETCONNINFO:
656 return hci_get_conn_info(hdev, (void __user *) arg); 681 return hci_get_conn_info(hdev, (void __user *)arg);
657 682
658 case HCIGETAUTHINFO: 683 case HCIGETAUTHINFO:
659 return hci_get_auth_info(hdev, (void __user *) arg); 684 return hci_get_auth_info(hdev, (void __user *)arg);
660 685
661 case HCIBLOCKADDR: 686 case HCIBLOCKADDR:
662 if (!capable(CAP_NET_ADMIN)) 687 if (!capable(CAP_NET_ADMIN))
663 return -EPERM; 688 return -EPERM;
664 return hci_sock_blacklist_add(hdev, (void __user *) arg); 689 return hci_sock_blacklist_add(hdev, (void __user *)arg);
665 690
666 case HCIUNBLOCKADDR: 691 case HCIUNBLOCKADDR:
667 if (!capable(CAP_NET_ADMIN)) 692 if (!capable(CAP_NET_ADMIN))
668 return -EPERM; 693 return -EPERM;
669 return hci_sock_blacklist_del(hdev, (void __user *) arg); 694 return hci_sock_blacklist_del(hdev, (void __user *)arg);
670 } 695 }
671 696
672 return -ENOIOCTLCMD; 697 return -ENOIOCTLCMD;
@@ -675,7 +700,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
675static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, 700static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
676 unsigned long arg) 701 unsigned long arg)
677{ 702{
678 void __user *argp = (void __user *) arg; 703 void __user *argp = (void __user *)arg;
679 struct sock *sk = sock->sk; 704 struct sock *sk = sock->sk;
680 int err; 705 int err;
681 706
@@ -872,11 +897,27 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
872 */ 897 */
873 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); 898 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
874 899
900 send_monitor_note(sk, "Linux version " UTS_RELEASE
901 " (" UTS_MACHINE ")");
902 send_monitor_note(sk, "Bluetooth subsystem version "
903 BT_SUBSYS_VERSION);
875 send_monitor_replay(sk); 904 send_monitor_replay(sk);
876 905
877 atomic_inc(&monitor_promisc); 906 atomic_inc(&monitor_promisc);
878 break; 907 break;
879 908
909 case HCI_CHANNEL_LOGGING:
910 if (haddr.hci_dev != HCI_DEV_NONE) {
911 err = -EINVAL;
912 goto done;
913 }
914
915 if (!capable(CAP_NET_ADMIN)) {
916 err = -EPERM;
917 goto done;
918 }
919 break;
920
880 default: 921 default:
881 if (!hci_mgmt_chan_find(haddr.hci_channel)) { 922 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
882 err = -EINVAL; 923 err = -EINVAL;
@@ -926,7 +967,7 @@ done:
926static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, 967static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
927 int *addr_len, int peer) 968 int *addr_len, int peer)
928{ 969{
929 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; 970 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
930 struct sock *sk = sock->sk; 971 struct sock *sk = sock->sk;
931 struct hci_dev *hdev; 972 struct hci_dev *hdev;
932 int err = 0; 973 int err = 0;
@@ -991,8 +1032,8 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
991 } 1032 }
992} 1033}
993 1034
994static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 1035static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
995 int flags) 1036 size_t len, int flags)
996{ 1037{
997 int noblock = flags & MSG_DONTWAIT; 1038 int noblock = flags & MSG_DONTWAIT;
998 struct sock *sk = sock->sk; 1039 struct sock *sk = sock->sk;
@@ -1004,6 +1045,9 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1004 if (flags & MSG_OOB) 1045 if (flags & MSG_OOB)
1005 return -EOPNOTSUPP; 1046 return -EOPNOTSUPP;
1006 1047
1048 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1049 return -EOPNOTSUPP;
1050
1007 if (sk->sk_state == BT_CLOSED) 1051 if (sk->sk_state == BT_CLOSED)
1008 return 0; 1052 return 0;
1009 1053
@@ -1150,6 +1194,90 @@ done:
1150 return err; 1194 return err;
1151} 1195}
1152 1196
1197static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1198{
1199 struct hci_mon_hdr *hdr;
1200 struct sk_buff *skb;
1201 struct hci_dev *hdev;
1202 u16 index;
1203 int err;
1204
1205 /* The logging frame consists at minimum of the standard header,
1206 * the priority byte, the ident length byte and at least one string
1207 * terminator NUL byte. Anything shorter are invalid packets.
1208 */
1209 if (len < sizeof(*hdr) + 3)
1210 return -EINVAL;
1211
1212 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1213 if (!skb)
1214 return err;
1215
1216 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1217 err = -EFAULT;
1218 goto drop;
1219 }
1220
1221 hdr = (void *)skb->data;
1222
1223 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1224 err = -EINVAL;
1225 goto drop;
1226 }
1227
1228 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1229 __u8 priority = skb->data[sizeof(*hdr)];
1230 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1231
1232 /* Only the priorities 0-7 are valid and with that any other
1233 * value results in an invalid packet.
1234 *
1235 * The priority byte is followed by an ident length byte and
1236 * the NUL terminated ident string. Check that the ident
1237 * length is not overflowing the packet and also that the
1238 * ident string itself is NUL terminated. In case the ident
1239 * length is zero, the length value actually doubles as NUL
1240 * terminator identifier.
1241 *
1242 * The message follows the ident string (if present) and
1243 * must be NUL terminated. Otherwise it is not a valid packet.
1244 */
1245 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1246 ident_len > len - sizeof(*hdr) - 3 ||
1247 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1248 err = -EINVAL;
1249 goto drop;
1250 }
1251 } else {
1252 err = -EINVAL;
1253 goto drop;
1254 }
1255
1256 index = __le16_to_cpu(hdr->index);
1257
1258 if (index != MGMT_INDEX_NONE) {
1259 hdev = hci_dev_get(index);
1260 if (!hdev) {
1261 err = -ENODEV;
1262 goto drop;
1263 }
1264 } else {
1265 hdev = NULL;
1266 }
1267
1268 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1269
1270 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1271 err = len;
1272
1273 if (hdev)
1274 hci_dev_put(hdev);
1275
1276drop:
1277 kfree_skb(skb);
1278 return err;
1279}
1280
1153static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, 1281static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1154 size_t len) 1282 size_t len)
1155{ 1283{
@@ -1179,6 +1307,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1179 case HCI_CHANNEL_MONITOR: 1307 case HCI_CHANNEL_MONITOR:
1180 err = -EOPNOTSUPP; 1308 err = -EOPNOTSUPP;
1181 goto done; 1309 goto done;
1310 case HCI_CHANNEL_LOGGING:
1311 err = hci_logging_frame(sk, msg, len);
1312 goto done;
1182 default: 1313 default:
1183 mutex_lock(&mgmt_chan_list_lock); 1314 mutex_lock(&mgmt_chan_list_lock);
1184 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel); 1315 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
@@ -1211,7 +1342,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1211 goto drop; 1342 goto drop;
1212 } 1343 }
1213 1344
1214 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); 1345 hci_skb_pkt_type(skb) = skb->data[0];
1215 skb_pull(skb, 1); 1346 skb_pull(skb, 1);
1216 1347
1217 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { 1348 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
@@ -1220,16 +1351,16 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1220 * 1351 *
1221 * However check that the packet type is valid. 1352 * However check that the packet type is valid.
1222 */ 1353 */
1223 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT && 1354 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1224 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && 1355 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1225 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { 1356 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1226 err = -EINVAL; 1357 err = -EINVAL;
1227 goto drop; 1358 goto drop;
1228 } 1359 }
1229 1360
1230 skb_queue_tail(&hdev->raw_q, skb); 1361 skb_queue_tail(&hdev->raw_q, skb);
1231 queue_work(hdev->workqueue, &hdev->tx_work); 1362 queue_work(hdev->workqueue, &hdev->tx_work);
1232 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { 1363 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1233 u16 opcode = get_unaligned_le16(skb->data); 1364 u16 opcode = get_unaligned_le16(skb->data);
1234 u16 ogf = hci_opcode_ogf(opcode); 1365 u16 ogf = hci_opcode_ogf(opcode);
1235 u16 ocf = hci_opcode_ocf(opcode); 1366 u16 ocf = hci_opcode_ocf(opcode);
@@ -1242,6 +1373,11 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1242 goto drop; 1373 goto drop;
1243 } 1374 }
1244 1375
1376 /* Since the opcode has already been extracted here, store
1377 * a copy of the value for later use by the drivers.
1378 */
1379 hci_skb_opcode(skb) = opcode;
1380
1245 if (ogf == 0x3f) { 1381 if (ogf == 0x3f) {
1246 skb_queue_tail(&hdev->raw_q, skb); 1382 skb_queue_tail(&hdev->raw_q, skb);
1247 queue_work(hdev->workqueue, &hdev->tx_work); 1383 queue_work(hdev->workqueue, &hdev->tx_work);
@@ -1249,7 +1385,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1249 /* Stand-alone HCI commands must be flagged as 1385 /* Stand-alone HCI commands must be flagged as
1250 * single-command requests. 1386 * single-command requests.
1251 */ 1387 */
1252 bt_cb(skb)->hci.req_start = true; 1388 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1253 1389
1254 skb_queue_tail(&hdev->cmd_q, skb); 1390 skb_queue_tail(&hdev->cmd_q, skb);
1255 queue_work(hdev->workqueue, &hdev->cmd_work); 1391 queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -1260,8 +1396,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1260 goto drop; 1396 goto drop;
1261 } 1397 }
1262 1398
1263 if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && 1399 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1264 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { 1400 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1265 err = -EINVAL; 1401 err = -EINVAL;
1266 goto drop; 1402 goto drop;
1267 } 1403 }
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 66e8b6ee19a5..39a5149f3010 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -6538,8 +6538,6 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6538static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 6538static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6539 struct sk_buff *skb) 6539 struct sk_buff *skb)
6540{ 6540{
6541 int err = 0;
6542
6543 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, 6541 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6544 chan->rx_state); 6542 chan->rx_state);
6545 6543
@@ -6570,7 +6568,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6570 chan->last_acked_seq = control->txseq; 6568 chan->last_acked_seq = control->txseq;
6571 chan->expected_tx_seq = __next_seq(chan, control->txseq); 6569 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6572 6570
6573 return err; 6571 return 0;
6574} 6572}
6575 6573
6576static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 6574static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
@@ -7113,8 +7111,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7113 chan->dcid = cid; 7111 chan->dcid = cid;
7114 7112
7115 if (bdaddr_type_is_le(dst_type)) { 7113 if (bdaddr_type_is_le(dst_type)) {
7116 u8 role;
7117
7118 /* Convert from L2CAP channel address type to HCI address type 7114 /* Convert from L2CAP channel address type to HCI address type
7119 */ 7115 */
7120 if (dst_type == BDADDR_LE_PUBLIC) 7116 if (dst_type == BDADDR_LE_PUBLIC)
@@ -7123,14 +7119,15 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7123 dst_type = ADDR_LE_DEV_RANDOM; 7119 dst_type = ADDR_LE_DEV_RANDOM;
7124 7120
7125 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 7121 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7126 role = HCI_ROLE_SLAVE; 7122 hcon = hci_connect_le(hdev, dst, dst_type,
7123 chan->sec_level,
7124 HCI_LE_CONN_TIMEOUT,
7125 HCI_ROLE_SLAVE);
7127 else 7126 else
7128 role = HCI_ROLE_MASTER; 7127 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7128 chan->sec_level,
7129 HCI_LE_CONN_TIMEOUT);
7129 7130
7130 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7131 chan->sec_level,
7132 HCI_LE_CONN_TIMEOUT,
7133 role);
7134 } else { 7131 } else {
7135 u8 auth_type = l2cap_get_auth_type(chan); 7132 u8 auth_type = l2cap_get_auth_type(chan);
7136 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type); 7133 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7f22119276f3..3d9d2e4839c5 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -38,7 +38,7 @@
38#include "mgmt_util.h" 38#include "mgmt_util.h"
39 39
40#define MGMT_VERSION 1 40#define MGMT_VERSION 1
41#define MGMT_REVISION 10 41#define MGMT_REVISION 11
42 42
43static const u16 mgmt_commands[] = { 43static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST, 44 MGMT_OP_READ_INDEX_LIST,
@@ -102,6 +102,7 @@ static const u16 mgmt_commands[] = {
102 MGMT_OP_READ_ADV_FEATURES, 102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING, 103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING, 104 MGMT_OP_REMOVE_ADVERTISING,
105 MGMT_OP_GET_ADV_SIZE_INFO,
105}; 106};
106 107
107static const u16 mgmt_events[] = { 108static const u16 mgmt_events[] = {
@@ -1416,49 +1417,6 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1416 } 1417 }
1417} 1418}
1418 1419
1419static bool hci_stop_discovery(struct hci_request *req)
1420{
1421 struct hci_dev *hdev = req->hdev;
1422 struct hci_cp_remote_name_req_cancel cp;
1423 struct inquiry_entry *e;
1424
1425 switch (hdev->discovery.state) {
1426 case DISCOVERY_FINDING:
1427 if (test_bit(HCI_INQUIRY, &hdev->flags))
1428 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1429
1430 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1431 cancel_delayed_work(&hdev->le_scan_disable);
1432 hci_req_add_le_scan_disable(req);
1433 }
1434
1435 return true;
1436
1437 case DISCOVERY_RESOLVING:
1438 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1439 NAME_PENDING);
1440 if (!e)
1441 break;
1442
1443 bacpy(&cp.bdaddr, &e->data.bdaddr);
1444 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1445 &cp);
1446
1447 return true;
1448
1449 default:
1450 /* Passive scanning */
1451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1452 hci_req_add_le_scan_disable(req);
1453 return true;
1454 }
1455
1456 break;
1457 }
1458
1459 return false;
1460}
1461
1462static void advertising_added(struct sock *sk, struct hci_dev *hdev, 1420static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1463 u8 instance) 1421 u8 instance)
1464{ 1422{
@@ -1636,7 +1594,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1636 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) 1594 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1637 disable_advertising(&req); 1595 disable_advertising(&req);
1638 1596
1639 discov_stopped = hci_stop_discovery(&req); 1597 discov_stopped = hci_req_stop_discovery(&req);
1640 1598
1641 list_for_each_entry(conn, &hdev->conn_hash.list, list) { 1599 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1642 /* 0x15 == Terminated due to Power Off */ 1600 /* 0x15 == Terminated due to Power Off */
@@ -2510,8 +2468,8 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2510 hci_req_init(&req, hdev); 2468 hci_req_init(&req, hdev);
2511 update_adv_data(&req); 2469 update_adv_data(&req);
2512 update_scan_rsp_data(&req); 2470 update_scan_rsp_data(&req);
2513 __hci_update_background_scan(&req);
2514 hci_req_run(&req, NULL); 2471 hci_req_run(&req, NULL);
2472 hci_update_background_scan(hdev);
2515 } 2473 }
2516 2474
2517unlock: 2475unlock:
@@ -3561,8 +3519,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3561 3519
3562 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, 3520 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
3563 addr_type, sec_level, 3521 addr_type, sec_level,
3564 HCI_LE_CONN_TIMEOUT, 3522 HCI_LE_CONN_TIMEOUT);
3565 HCI_ROLE_MASTER);
3566 } 3523 }
3567 3524
3568 if (IS_ERR(conn)) { 3525 if (IS_ERR(conn)) {
@@ -4164,145 +4121,9 @@ done:
4164 return err; 4121 return err;
4165} 4122}
4166 4123
4167static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status) 4124void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4168{
4169 struct hci_dev *hdev = req->hdev;
4170 struct hci_cp_inquiry cp;
4171 /* General inquiry access code (GIAC) */
4172 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4173
4174 *status = mgmt_bredr_support(hdev);
4175 if (*status)
4176 return false;
4177
4178 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
4179 *status = MGMT_STATUS_BUSY;
4180 return false;
4181 }
4182
4183 hci_inquiry_cache_flush(hdev);
4184
4185 memset(&cp, 0, sizeof(cp));
4186 memcpy(&cp.lap, lap, sizeof(cp.lap));
4187 cp.length = DISCOV_BREDR_INQUIRY_LEN;
4188
4189 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4190
4191 return true;
4192}
4193
4194static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4195{
4196 struct hci_dev *hdev = req->hdev;
4197 struct hci_cp_le_set_scan_param param_cp;
4198 struct hci_cp_le_set_scan_enable enable_cp;
4199 u8 own_addr_type;
4200 int err;
4201
4202 *status = mgmt_le_support(hdev);
4203 if (*status)
4204 return false;
4205
4206 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4207 /* Don't let discovery abort an outgoing connection attempt
4208 * that's using directed advertising.
4209 */
4210 if (hci_lookup_le_connect(hdev)) {
4211 *status = MGMT_STATUS_REJECTED;
4212 return false;
4213 }
4214
4215 cancel_adv_timeout(hdev);
4216 disable_advertising(req);
4217 }
4218
4219 /* If controller is scanning, it means the background scanning is
4220 * running. Thus, we should temporarily stop it in order to set the
4221 * discovery scanning parameters.
4222 */
4223 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4224 hci_req_add_le_scan_disable(req);
4225
4226 /* All active scans will be done with either a resolvable private
4227 * address (when privacy feature has been enabled) or non-resolvable
4228 * private address.
4229 */
4230 err = hci_update_random_address(req, true, &own_addr_type);
4231 if (err < 0) {
4232 *status = MGMT_STATUS_FAILED;
4233 return false;
4234 }
4235
4236 memset(&param_cp, 0, sizeof(param_cp));
4237 param_cp.type = LE_SCAN_ACTIVE;
4238 param_cp.interval = cpu_to_le16(interval);
4239 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4240 param_cp.own_address_type = own_addr_type;
4241
4242 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4243 &param_cp);
4244
4245 memset(&enable_cp, 0, sizeof(enable_cp));
4246 enable_cp.enable = LE_SCAN_ENABLE;
4247 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4248
4249 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4250 &enable_cp);
4251
4252 return true;
4253}
4254
4255static bool trigger_discovery(struct hci_request *req, u8 *status)
4256{
4257 struct hci_dev *hdev = req->hdev;
4258
4259 switch (hdev->discovery.type) {
4260 case DISCOV_TYPE_BREDR:
4261 if (!trigger_bredr_inquiry(req, status))
4262 return false;
4263 break;
4264
4265 case DISCOV_TYPE_INTERLEAVED:
4266 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4267 &hdev->quirks)) {
4268 /* During simultaneous discovery, we double LE scan
4269 * interval. We must leave some time for the controller
4270 * to do BR/EDR inquiry.
4271 */
4272 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4273 status))
4274 return false;
4275
4276 if (!trigger_bredr_inquiry(req, status))
4277 return false;
4278
4279 return true;
4280 }
4281
4282 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4283 *status = MGMT_STATUS_NOT_SUPPORTED;
4284 return false;
4285 }
4286 /* fall through */
4287
4288 case DISCOV_TYPE_LE:
4289 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4290 return false;
4291 break;
4292
4293 default:
4294 *status = MGMT_STATUS_INVALID_PARAMS;
4295 return false;
4296 }
4297
4298 return true;
4299}
4300
4301static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4302 u16 opcode)
4303{ 4125{
4304 struct mgmt_pending_cmd *cmd; 4126 struct mgmt_pending_cmd *cmd;
4305 unsigned long timeout;
4306 4127
4307 BT_DBG("status %d", status); 4128 BT_DBG("status %d", status);
4308 4129
@@ -4317,62 +4138,34 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4317 mgmt_pending_remove(cmd); 4138 mgmt_pending_remove(cmd);
4318 } 4139 }
4319 4140
4320 if (status) { 4141 hci_dev_unlock(hdev);
4321 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 4142}
4322 goto unlock;
4323 }
4324
4325 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4326 4143
4327 /* If the scan involves LE scan, pick proper timeout to schedule 4144static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4328 * hdev->le_scan_disable that will stop it. 4145 uint8_t *mgmt_status)
4329 */ 4146{
4330 switch (hdev->discovery.type) { 4147 switch (type) {
4331 case DISCOV_TYPE_LE: 4148 case DISCOV_TYPE_LE:
4332 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); 4149 *mgmt_status = mgmt_le_support(hdev);
4150 if (*mgmt_status)
4151 return false;
4333 break; 4152 break;
4334 case DISCOV_TYPE_INTERLEAVED: 4153 case DISCOV_TYPE_INTERLEAVED:
4335 /* When running simultaneous discovery, the LE scanning time 4154 *mgmt_status = mgmt_le_support(hdev);
4336 * should occupy the whole discovery time sine BR/EDR inquiry 4155 if (*mgmt_status)
4337 * and LE scanning are scheduled by the controller. 4156 return false;
4338 * 4157 /* Intentional fall-through */
4339 * For interleaving discovery in comparison, BR/EDR inquiry
4340 * and LE scanning are done sequentially with separate
4341 * timeouts.
4342 */
4343 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4344 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4345 else
4346 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4347 break;
4348 case DISCOV_TYPE_BREDR: 4158 case DISCOV_TYPE_BREDR:
4349 timeout = 0; 4159 *mgmt_status = mgmt_bredr_support(hdev);
4160 if (*mgmt_status)
4161 return false;
4350 break; 4162 break;
4351 default: 4163 default:
4352 BT_ERR("Invalid discovery type %d", hdev->discovery.type); 4164 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4353 timeout = 0; 4165 return false;
4354 break;
4355 }
4356
4357 if (timeout) {
4358 /* When service discovery is used and the controller has
4359 * a strict duplicate filter, it is important to remember
4360 * the start and duration of the scan. This is required
4361 * for restarting scanning during the discovery phase.
4362 */
4363 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4364 &hdev->quirks) &&
4365 hdev->discovery.result_filtering) {
4366 hdev->discovery.scan_start = jiffies;
4367 hdev->discovery.scan_duration = timeout;
4368 }
4369
4370 queue_delayed_work(hdev->workqueue,
4371 &hdev->le_scan_disable, timeout);
4372 } 4166 }
4373 4167
4374unlock: 4168 return true;
4375 hci_dev_unlock(hdev);
4376} 4169}
4377 4170
4378static int start_discovery(struct sock *sk, struct hci_dev *hdev, 4171static int start_discovery(struct sock *sk, struct hci_dev *hdev,
@@ -4380,7 +4173,6 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4380{ 4173{
4381 struct mgmt_cp_start_discovery *cp = data; 4174 struct mgmt_cp_start_discovery *cp = data;
4382 struct mgmt_pending_cmd *cmd; 4175 struct mgmt_pending_cmd *cmd;
4383 struct hci_request req;
4384 u8 status; 4176 u8 status;
4385 int err; 4177 int err;
4386 4178
@@ -4403,14 +4195,12 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4403 goto failed; 4195 goto failed;
4404 } 4196 }
4405 4197
4406 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len); 4198 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4407 if (!cmd) { 4199 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4408 err = -ENOMEM; 4200 status, &cp->type, sizeof(cp->type));
4409 goto failed; 4201 goto failed;
4410 } 4202 }
4411 4203
4412 cmd->cmd_complete = generic_cmd_complete;
4413
4414 /* Clear the discovery filter first to free any previously 4204 /* Clear the discovery filter first to free any previously
4415 * allocated memory for the UUID list. 4205 * allocated memory for the UUID list.
4416 */ 4206 */
@@ -4419,22 +4209,17 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4419 hdev->discovery.type = cp->type; 4209 hdev->discovery.type = cp->type;
4420 hdev->discovery.report_invalid_rssi = false; 4210 hdev->discovery.report_invalid_rssi = false;
4421 4211
4422 hci_req_init(&req, hdev); 4212 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4423 4213 if (!cmd) {
4424 if (!trigger_discovery(&req, &status)) { 4214 err = -ENOMEM;
4425 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4426 status, &cp->type, sizeof(cp->type));
4427 mgmt_pending_remove(cmd);
4428 goto failed; 4215 goto failed;
4429 } 4216 }
4430 4217
4431 err = hci_req_run(&req, start_discovery_complete); 4218 cmd->cmd_complete = generic_cmd_complete;
4432 if (err < 0) {
4433 mgmt_pending_remove(cmd);
4434 goto failed;
4435 }
4436 4219
4437 hci_discovery_set_state(hdev, DISCOVERY_STARTING); 4220 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4221 queue_work(hdev->req_workqueue, &hdev->discov_update);
4222 err = 0;
4438 4223
4439failed: 4224failed:
4440 hci_dev_unlock(hdev); 4225 hci_dev_unlock(hdev);
@@ -4453,7 +4238,6 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4453{ 4238{
4454 struct mgmt_cp_start_service_discovery *cp = data; 4239 struct mgmt_cp_start_service_discovery *cp = data;
4455 struct mgmt_pending_cmd *cmd; 4240 struct mgmt_pending_cmd *cmd;
4456 struct hci_request req;
4457 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16); 4241 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4458 u16 uuid_count, expected_len; 4242 u16 uuid_count, expected_len;
4459 u8 status; 4243 u8 status;
@@ -4502,6 +4286,13 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4502 goto failed; 4286 goto failed;
4503 } 4287 }
4504 4288
4289 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4290 err = mgmt_cmd_complete(sk, hdev->id,
4291 MGMT_OP_START_SERVICE_DISCOVERY,
4292 status, &cp->type, sizeof(cp->type));
4293 goto failed;
4294 }
4295
4505 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY, 4296 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4506 hdev, data, len); 4297 hdev, data, len);
4507 if (!cmd) { 4298 if (!cmd) {
@@ -4534,30 +4325,16 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4534 } 4325 }
4535 } 4326 }
4536 4327
4537 hci_req_init(&req, hdev);
4538
4539 if (!trigger_discovery(&req, &status)) {
4540 err = mgmt_cmd_complete(sk, hdev->id,
4541 MGMT_OP_START_SERVICE_DISCOVERY,
4542 status, &cp->type, sizeof(cp->type));
4543 mgmt_pending_remove(cmd);
4544 goto failed;
4545 }
4546
4547 err = hci_req_run(&req, start_discovery_complete);
4548 if (err < 0) {
4549 mgmt_pending_remove(cmd);
4550 goto failed;
4551 }
4552
4553 hci_discovery_set_state(hdev, DISCOVERY_STARTING); 4328 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4329 queue_work(hdev->req_workqueue, &hdev->discov_update);
4330 err = 0;
4554 4331
4555failed: 4332failed:
4556 hci_dev_unlock(hdev); 4333 hci_dev_unlock(hdev);
4557 return err; 4334 return err;
4558} 4335}
4559 4336
4560static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode) 4337void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4561{ 4338{
4562 struct mgmt_pending_cmd *cmd; 4339 struct mgmt_pending_cmd *cmd;
4563 4340
@@ -4571,9 +4348,6 @@ static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4571 mgmt_pending_remove(cmd); 4348 mgmt_pending_remove(cmd);
4572 } 4349 }
4573 4350
4574 if (!status)
4575 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4576
4577 hci_dev_unlock(hdev); 4351 hci_dev_unlock(hdev);
4578} 4352}
4579 4353
@@ -4582,7 +4356,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4582{ 4356{
4583 struct mgmt_cp_stop_discovery *mgmt_cp = data; 4357 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4584 struct mgmt_pending_cmd *cmd; 4358 struct mgmt_pending_cmd *cmd;
4585 struct hci_request req;
4586 int err; 4359 int err;
4587 4360
4588 BT_DBG("%s", hdev->name); 4361 BT_DBG("%s", hdev->name);
@@ -4611,24 +4384,9 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4611 4384
4612 cmd->cmd_complete = generic_cmd_complete; 4385 cmd->cmd_complete = generic_cmd_complete;
4613 4386
4614 hci_req_init(&req, hdev); 4387 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4615 4388 queue_work(hdev->req_workqueue, &hdev->discov_update);
4616 hci_stop_discovery(&req); 4389 err = 0;
4617
4618 err = hci_req_run(&req, stop_discovery_complete);
4619 if (!err) {
4620 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4621 goto unlock;
4622 }
4623
4624 mgmt_pending_remove(cmd);
4625
4626 /* If no HCI commands were sent we're done */
4627 if (err == -ENODATA) {
4628 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4629 &mgmt_cp->type, sizeof(mgmt_cp->type));
4630 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4631 }
4632 4390
4633unlock: 4391unlock:
4634 hci_dev_unlock(hdev); 4392 hci_dev_unlock(hdev);
@@ -6076,10 +5834,9 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6076} 5834}
6077 5835
6078/* This function requires the caller holds hdev->lock */ 5836/* This function requires the caller holds hdev->lock */
6079static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr, 5837static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6080 u8 addr_type, u8 auto_connect) 5838 u8 addr_type, u8 auto_connect)
6081{ 5839{
6082 struct hci_dev *hdev = req->hdev;
6083 struct hci_conn_params *params; 5840 struct hci_conn_params *params;
6084 5841
6085 params = hci_conn_params_add(hdev, addr, addr_type); 5842 params = hci_conn_params_add(hdev, addr, addr_type);
@@ -6099,26 +5856,17 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
6099 */ 5856 */
6100 if (params->explicit_connect) 5857 if (params->explicit_connect)
6101 list_add(&params->action, &hdev->pend_le_conns); 5858 list_add(&params->action, &hdev->pend_le_conns);
6102
6103 __hci_update_background_scan(req);
6104 break; 5859 break;
6105 case HCI_AUTO_CONN_REPORT: 5860 case HCI_AUTO_CONN_REPORT:
6106 if (params->explicit_connect) 5861 if (params->explicit_connect)
6107 list_add(&params->action, &hdev->pend_le_conns); 5862 list_add(&params->action, &hdev->pend_le_conns);
6108 else 5863 else
6109 list_add(&params->action, &hdev->pend_le_reports); 5864 list_add(&params->action, &hdev->pend_le_reports);
6110 __hci_update_background_scan(req);
6111 break; 5865 break;
6112 case HCI_AUTO_CONN_DIRECT: 5866 case HCI_AUTO_CONN_DIRECT:
6113 case HCI_AUTO_CONN_ALWAYS: 5867 case HCI_AUTO_CONN_ALWAYS:
6114 if (!is_connected(hdev, addr, addr_type)) { 5868 if (!is_connected(hdev, addr, addr_type))
6115 list_add(&params->action, &hdev->pend_le_conns); 5869 list_add(&params->action, &hdev->pend_le_conns);
6116 /* If we are in scan phase of connecting, we were
6117 * already added to pend_le_conns and scanning.
6118 */
6119 if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT)
6120 __hci_update_background_scan(req);
6121 }
6122 break; 5870 break;
6123 } 5871 }
6124 5872
@@ -6142,31 +5890,10 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
6142 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk); 5890 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6143} 5891}
6144 5892
6145static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6146{
6147 struct mgmt_pending_cmd *cmd;
6148
6149 BT_DBG("status 0x%02x", status);
6150
6151 hci_dev_lock(hdev);
6152
6153 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
6154 if (!cmd)
6155 goto unlock;
6156
6157 cmd->cmd_complete(cmd, mgmt_status(status));
6158 mgmt_pending_remove(cmd);
6159
6160unlock:
6161 hci_dev_unlock(hdev);
6162}
6163
6164static int add_device(struct sock *sk, struct hci_dev *hdev, 5893static int add_device(struct sock *sk, struct hci_dev *hdev,
6165 void *data, u16 len) 5894 void *data, u16 len)
6166{ 5895{
6167 struct mgmt_cp_add_device *cp = data; 5896 struct mgmt_cp_add_device *cp = data;
6168 struct mgmt_pending_cmd *cmd;
6169 struct hci_request req;
6170 u8 auto_conn, addr_type; 5897 u8 auto_conn, addr_type;
6171 int err; 5898 int err;
6172 5899
@@ -6183,24 +5910,15 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
6183 MGMT_STATUS_INVALID_PARAMS, 5910 MGMT_STATUS_INVALID_PARAMS,
6184 &cp->addr, sizeof(cp->addr)); 5911 &cp->addr, sizeof(cp->addr));
6185 5912
6186 hci_req_init(&req, hdev);
6187
6188 hci_dev_lock(hdev); 5913 hci_dev_lock(hdev);
6189 5914
6190 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
6191 if (!cmd) {
6192 err = -ENOMEM;
6193 goto unlock;
6194 }
6195
6196 cmd->cmd_complete = addr_cmd_complete;
6197
6198 if (cp->addr.type == BDADDR_BREDR) { 5915 if (cp->addr.type == BDADDR_BREDR) {
6199 /* Only incoming connections action is supported for now */ 5916 /* Only incoming connections action is supported for now */
6200 if (cp->action != 0x01) { 5917 if (cp->action != 0x01) {
6201 err = cmd->cmd_complete(cmd, 5918 err = mgmt_cmd_complete(sk, hdev->id,
6202 MGMT_STATUS_INVALID_PARAMS); 5919 MGMT_OP_ADD_DEVICE,
6203 mgmt_pending_remove(cmd); 5920 MGMT_STATUS_INVALID_PARAMS,
5921 &cp->addr, sizeof(cp->addr));
6204 goto unlock; 5922 goto unlock;
6205 } 5923 }
6206 5924
@@ -6209,7 +5927,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
6209 if (err) 5927 if (err)
6210 goto unlock; 5928 goto unlock;
6211 5929
6212 __hci_update_page_scan(&req); 5930 hci_update_page_scan(hdev);
6213 5931
6214 goto added; 5932 goto added;
6215 } 5933 }
@@ -6229,33 +5947,31 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
6229 * hci_conn_params_lookup. 5947 * hci_conn_params_lookup.
6230 */ 5948 */
6231 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) { 5949 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6232 err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS); 5950 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6233 mgmt_pending_remove(cmd); 5951 MGMT_STATUS_INVALID_PARAMS,
5952 &cp->addr, sizeof(cp->addr));
6234 goto unlock; 5953 goto unlock;
6235 } 5954 }
6236 5955
6237 /* If the connection parameters don't exist for this device, 5956 /* If the connection parameters don't exist for this device,
6238 * they will be created and configured with defaults. 5957 * they will be created and configured with defaults.
6239 */ 5958 */
6240 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type, 5959 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6241 auto_conn) < 0) { 5960 auto_conn) < 0) {
6242 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED); 5961 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6243 mgmt_pending_remove(cmd); 5962 MGMT_STATUS_FAILED, &cp->addr,
5963 sizeof(cp->addr));
6244 goto unlock; 5964 goto unlock;
6245 } 5965 }
6246 5966
5967 hci_update_background_scan(hdev);
5968
6247added: 5969added:
6248 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); 5970 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6249 5971
6250 err = hci_req_run(&req, add_device_complete); 5972 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6251 if (err < 0) { 5973 MGMT_STATUS_SUCCESS, &cp->addr,
6252 /* ENODATA means no HCI commands were needed (e.g. if 5974 sizeof(cp->addr));
6253 * the adapter is powered off).
6254 */
6255 if (err == -ENODATA)
6256 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6257 mgmt_pending_remove(cmd);
6258 }
6259 5975
6260unlock: 5976unlock:
6261 hci_dev_unlock(hdev); 5977 hci_dev_unlock(hdev);
@@ -6273,55 +5989,25 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
6273 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk); 5989 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6274} 5990}
6275 5991
6276static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6277{
6278 struct mgmt_pending_cmd *cmd;
6279
6280 BT_DBG("status 0x%02x", status);
6281
6282 hci_dev_lock(hdev);
6283
6284 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6285 if (!cmd)
6286 goto unlock;
6287
6288 cmd->cmd_complete(cmd, mgmt_status(status));
6289 mgmt_pending_remove(cmd);
6290
6291unlock:
6292 hci_dev_unlock(hdev);
6293}
6294
6295static int remove_device(struct sock *sk, struct hci_dev *hdev, 5992static int remove_device(struct sock *sk, struct hci_dev *hdev,
6296 void *data, u16 len) 5993 void *data, u16 len)
6297{ 5994{
6298 struct mgmt_cp_remove_device *cp = data; 5995 struct mgmt_cp_remove_device *cp = data;
6299 struct mgmt_pending_cmd *cmd;
6300 struct hci_request req;
6301 int err; 5996 int err;
6302 5997
6303 BT_DBG("%s", hdev->name); 5998 BT_DBG("%s", hdev->name);
6304 5999
6305 hci_req_init(&req, hdev);
6306
6307 hci_dev_lock(hdev); 6000 hci_dev_lock(hdev);
6308 6001
6309 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6310 if (!cmd) {
6311 err = -ENOMEM;
6312 goto unlock;
6313 }
6314
6315 cmd->cmd_complete = addr_cmd_complete;
6316
6317 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { 6002 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6318 struct hci_conn_params *params; 6003 struct hci_conn_params *params;
6319 u8 addr_type; 6004 u8 addr_type;
6320 6005
6321 if (!bdaddr_type_is_valid(cp->addr.type)) { 6006 if (!bdaddr_type_is_valid(cp->addr.type)) {
6322 err = cmd->cmd_complete(cmd, 6007 err = mgmt_cmd_complete(sk, hdev->id,
6323 MGMT_STATUS_INVALID_PARAMS); 6008 MGMT_OP_REMOVE_DEVICE,
6324 mgmt_pending_remove(cmd); 6009 MGMT_STATUS_INVALID_PARAMS,
6010 &cp->addr, sizeof(cp->addr));
6325 goto unlock; 6011 goto unlock;
6326 } 6012 }
6327 6013
@@ -6330,13 +6016,15 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
6330 &cp->addr.bdaddr, 6016 &cp->addr.bdaddr,
6331 cp->addr.type); 6017 cp->addr.type);
6332 if (err) { 6018 if (err) {
6333 err = cmd->cmd_complete(cmd, 6019 err = mgmt_cmd_complete(sk, hdev->id,
6334 MGMT_STATUS_INVALID_PARAMS); 6020 MGMT_OP_REMOVE_DEVICE,
6335 mgmt_pending_remove(cmd); 6021 MGMT_STATUS_INVALID_PARAMS,
6022 &cp->addr,
6023 sizeof(cp->addr));
6336 goto unlock; 6024 goto unlock;
6337 } 6025 }
6338 6026
6339 __hci_update_page_scan(&req); 6027 hci_update_page_scan(hdev);
6340 6028
6341 device_removed(sk, hdev, &cp->addr.bdaddr, 6029 device_removed(sk, hdev, &cp->addr.bdaddr,
6342 cp->addr.type); 6030 cp->addr.type);
@@ -6351,33 +6039,36 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
6351 * hci_conn_params_lookup. 6039 * hci_conn_params_lookup.
6352 */ 6040 */
6353 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) { 6041 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6354 err = cmd->cmd_complete(cmd, 6042 err = mgmt_cmd_complete(sk, hdev->id,
6355 MGMT_STATUS_INVALID_PARAMS); 6043 MGMT_OP_REMOVE_DEVICE,
6356 mgmt_pending_remove(cmd); 6044 MGMT_STATUS_INVALID_PARAMS,
6045 &cp->addr, sizeof(cp->addr));
6357 goto unlock; 6046 goto unlock;
6358 } 6047 }
6359 6048
6360 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, 6049 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6361 addr_type); 6050 addr_type);
6362 if (!params) { 6051 if (!params) {
6363 err = cmd->cmd_complete(cmd, 6052 err = mgmt_cmd_complete(sk, hdev->id,
6364 MGMT_STATUS_INVALID_PARAMS); 6053 MGMT_OP_REMOVE_DEVICE,
6365 mgmt_pending_remove(cmd); 6054 MGMT_STATUS_INVALID_PARAMS,
6055 &cp->addr, sizeof(cp->addr));
6366 goto unlock; 6056 goto unlock;
6367 } 6057 }
6368 6058
6369 if (params->auto_connect == HCI_AUTO_CONN_DISABLED || 6059 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6370 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { 6060 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6371 err = cmd->cmd_complete(cmd, 6061 err = mgmt_cmd_complete(sk, hdev->id,
6372 MGMT_STATUS_INVALID_PARAMS); 6062 MGMT_OP_REMOVE_DEVICE,
6373 mgmt_pending_remove(cmd); 6063 MGMT_STATUS_INVALID_PARAMS,
6064 &cp->addr, sizeof(cp->addr));
6374 goto unlock; 6065 goto unlock;
6375 } 6066 }
6376 6067
6377 list_del(&params->action); 6068 list_del(&params->action);
6378 list_del(&params->list); 6069 list_del(&params->list);
6379 kfree(params); 6070 kfree(params);
6380 __hci_update_background_scan(&req); 6071 hci_update_background_scan(hdev);
6381 6072
6382 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); 6073 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6383 } else { 6074 } else {
@@ -6385,9 +6076,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
6385 struct bdaddr_list *b, *btmp; 6076 struct bdaddr_list *b, *btmp;
6386 6077
6387 if (cp->addr.type) { 6078 if (cp->addr.type) {
6388 err = cmd->cmd_complete(cmd, 6079 err = mgmt_cmd_complete(sk, hdev->id,
6389 MGMT_STATUS_INVALID_PARAMS); 6080 MGMT_OP_REMOVE_DEVICE,
6390 mgmt_pending_remove(cmd); 6081 MGMT_STATUS_INVALID_PARAMS,
6082 &cp->addr, sizeof(cp->addr));
6391 goto unlock; 6083 goto unlock;
6392 } 6084 }
6393 6085
@@ -6397,7 +6089,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
6397 kfree(b); 6089 kfree(b);
6398 } 6090 }
6399 6091
6400 __hci_update_page_scan(&req); 6092 hci_update_page_scan(hdev);
6401 6093
6402 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) { 6094 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6403 if (p->auto_connect == HCI_AUTO_CONN_DISABLED) 6095 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
@@ -6414,20 +6106,13 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
6414 6106
6415 BT_DBG("All LE connection parameters were removed"); 6107 BT_DBG("All LE connection parameters were removed");
6416 6108
6417 __hci_update_background_scan(&req); 6109 hci_update_background_scan(hdev);
6418 } 6110 }
6419 6111
6420complete: 6112complete:
6421 err = hci_req_run(&req, remove_device_complete); 6113 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6422 if (err < 0) { 6114 MGMT_STATUS_SUCCESS, &cp->addr,
6423 /* ENODATA means no HCI commands were needed (e.g. if 6115 sizeof(cp->addr));
6424 * the adapter is powered off).
6425 */
6426 if (err == -ENODATA)
6427 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6428 mgmt_pending_remove(cmd);
6429 }
6430
6431unlock: 6116unlock:
6432 hci_dev_unlock(hdev); 6117 hci_dev_unlock(hdev);
6433 return err; 6118 return err;
@@ -7016,17 +6701,19 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7016 int i, cur_len; 6701 int i, cur_len;
7017 bool flags_managed = false; 6702 bool flags_managed = false;
7018 bool tx_power_managed = false; 6703 bool tx_power_managed = false;
7019 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
7020 MGMT_ADV_FLAG_MANAGED_FLAGS;
7021 6704
7022 if (is_adv_data && (adv_flags & flags_params)) { 6705 if (is_adv_data) {
7023 flags_managed = true; 6706 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7024 max_len -= 3; 6707 MGMT_ADV_FLAG_LIMITED_DISCOV |
7025 } 6708 MGMT_ADV_FLAG_MANAGED_FLAGS)) {
6709 flags_managed = true;
6710 max_len -= 3;
6711 }
7026 6712
7027 if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) { 6713 if (adv_flags & MGMT_ADV_FLAG_TX_POWER) {
7028 tx_power_managed = true; 6714 tx_power_managed = true;
7029 max_len -= 3; 6715 max_len -= 3;
6716 }
7030 } 6717 }
7031 6718
7032 if (len > max_len) 6719 if (len > max_len)
@@ -7155,6 +6842,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7155 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, 6842 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7156 status); 6843 status);
7157 6844
6845 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6847 MGMT_STATUS_INVALID_PARAMS);
6848
7158 flags = __le32_to_cpu(cp->flags); 6849 flags = __le32_to_cpu(cp->flags);
7159 timeout = __le16_to_cpu(cp->timeout); 6850 timeout = __le16_to_cpu(cp->timeout);
7160 duration = __le16_to_cpu(cp->duration); 6851 duration = __le16_to_cpu(cp->duration);
@@ -7369,6 +7060,62 @@ unlock:
7369 return err; 7060 return err;
7370} 7061}
7371 7062
7063static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
7064{
7065 u8 max_len = HCI_MAX_AD_LENGTH;
7066
7067 if (is_adv_data) {
7068 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7069 MGMT_ADV_FLAG_LIMITED_DISCOV |
7070 MGMT_ADV_FLAG_MANAGED_FLAGS))
7071 max_len -= 3;
7072
7073 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7074 max_len -= 3;
7075 }
7076
7077 return max_len;
7078}
7079
7080static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7081 void *data, u16 data_len)
7082{
7083 struct mgmt_cp_get_adv_size_info *cp = data;
7084 struct mgmt_rp_get_adv_size_info rp;
7085 u32 flags, supported_flags;
7086 int err;
7087
7088 BT_DBG("%s", hdev->name);
7089
7090 if (!lmp_le_capable(hdev))
7091 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7092 MGMT_STATUS_REJECTED);
7093
7094 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
7095 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7096 MGMT_STATUS_INVALID_PARAMS);
7097
7098 flags = __le32_to_cpu(cp->flags);
7099
7100 /* The current implementation only supports a subset of the specified
7101 * flags.
7102 */
7103 supported_flags = get_supported_adv_flags(hdev);
7104 if (flags & ~supported_flags)
7105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7106 MGMT_STATUS_INVALID_PARAMS);
7107
7108 rp.instance = cp->instance;
7109 rp.flags = cp->flags;
7110 rp.max_adv_data_len = tlv_data_max_len(flags, true);
7111 rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
7112
7113 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7114 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7115
7116 return err;
7117}
7118
7372static const struct hci_mgmt_handler mgmt_handlers[] = { 7119static const struct hci_mgmt_handler mgmt_handlers[] = {
7373 { NULL }, /* 0x0000 (no command) */ 7120 { NULL }, /* 0x0000 (no command) */
7374 { read_version, MGMT_READ_VERSION_SIZE, 7121 { read_version, MGMT_READ_VERSION_SIZE,
@@ -7456,6 +7203,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
7456 { add_advertising, MGMT_ADD_ADVERTISING_SIZE, 7203 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7457 HCI_MGMT_VAR_LEN }, 7204 HCI_MGMT_VAR_LEN },
7458 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE }, 7205 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7206 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
7459}; 7207};
7460 7208
7461void mgmt_index_added(struct hci_dev *hdev) 7209void mgmt_index_added(struct hci_dev *hdev)
@@ -7526,9 +7274,8 @@ void mgmt_index_removed(struct hci_dev *hdev)
7526} 7274}
7527 7275
7528/* This function requires the caller holds hdev->lock */ 7276/* This function requires the caller holds hdev->lock */
7529static void restart_le_actions(struct hci_request *req) 7277static void restart_le_actions(struct hci_dev *hdev)
7530{ 7278{
7531 struct hci_dev *hdev = req->hdev;
7532 struct hci_conn_params *p; 7279 struct hci_conn_params *p;
7533 7280
7534 list_for_each_entry(p, &hdev->le_conn_params, list) { 7281 list_for_each_entry(p, &hdev->le_conn_params, list) {
@@ -7549,8 +7296,6 @@ static void restart_le_actions(struct hci_request *req)
7549 break; 7296 break;
7550 } 7297 }
7551 } 7298 }
7552
7553 __hci_update_background_scan(req);
7554} 7299}
7555 7300
7556static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode) 7301static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
@@ -7560,12 +7305,8 @@ static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7560 BT_DBG("status 0x%02x", status); 7305 BT_DBG("status 0x%02x", status);
7561 7306
7562 if (!status) { 7307 if (!status) {
7563 /* Register the available SMP channels (BR/EDR and LE) only 7308 restart_le_actions(hdev);
7564 * when successfully powering on the controller. This late 7309 hci_update_background_scan(hdev);
7565 * registration is required so that LE SMP can clearly
7566 * decide if the public address or static address is used.
7567 */
7568 smp_register(hdev);
7569 } 7310 }
7570 7311
7571 hci_dev_lock(hdev); 7312 hci_dev_lock(hdev);
@@ -7644,8 +7385,6 @@ static int powered_update_hci(struct hci_dev *hdev)
7644 hdev->cur_adv_instance) 7385 hdev->cur_adv_instance)
7645 schedule_adv_instance(&req, hdev->cur_adv_instance, 7386 schedule_adv_instance(&req, hdev->cur_adv_instance,
7646 true); 7387 true);
7647
7648 restart_le_actions(&req);
7649 } 7388 }
7650 7389
7651 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); 7390 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
@@ -7677,6 +7416,13 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
7677 return 0; 7416 return 0;
7678 7417
7679 if (powered) { 7418 if (powered) {
7419 /* Register the available SMP channels (BR/EDR and LE) only
7420 * when successfully powering on the controller. This late
7421 * registration is required so that LE SMP can clearly
7422 * decide if the public address or static address is used.
7423 */
7424 smp_register(hdev);
7425
7680 if (powered_update_hci(hdev) == 0) 7426 if (powered_update_hci(hdev) == 0)
7681 return 0; 7427 return 0;
7682 7428
@@ -8452,7 +8198,7 @@ static void restart_le_scan(struct hci_dev *hdev)
8452 hdev->discovery.scan_duration)) 8198 hdev->discovery.scan_duration))
8453 return; 8199 return;
8454 8200
8455 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart, 8201 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8456 DISCOV_LE_RESTART_DELAY); 8202 DISCOV_LE_RESTART_DELAY);
8457} 8203}
8458 8204
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index f7e8dee64fc8..5f3f64553179 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -48,7 +48,7 @@ void br_set_state(struct net_bridge_port *p, unsigned int state)
48 48
49 p->state = state; 49 p->state = state;
50 err = switchdev_port_attr_set(p->dev, &attr); 50 err = switchdev_port_attr_set(p->dev, &attr);
51 if (err) 51 if (err && err != -EOPNOTSUPP)
52 br_warn(p->br, "error setting offload STP state on port %u(%s)\n", 52 br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
53 (unsigned int) p->port_no, p->dev->name); 53 (unsigned int) p->port_no, p->dev->name);
54} 54}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index fa53d7a89f48..5396ff08af32 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -50,7 +50,7 @@ void br_init_port(struct net_bridge_port *p)
50 p->config_pending = 0; 50 p->config_pending = 0;
51 51
52 err = switchdev_port_attr_set(p->dev, &attr); 52 err = switchdev_port_attr_set(p->dev, &attr);
53 if (err) 53 if (err && err != -EOPNOTSUPP)
54 netdev_err(p->dev, "failed to set HW ageing time\n"); 54 netdev_err(p->dev, "failed to set HW ageing time\n");
55} 55}
56 56
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index ba6eb17226da..10d87753ed87 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/ceph/decode.h> 9#include <linux/ceph/decode.h>
10#include <linux/ceph/auth.h> 10#include <linux/ceph/auth.h>
11#include <linux/ceph/libceph.h>
11#include <linux/ceph/messenger.h> 12#include <linux/ceph/messenger.h>
12 13
13#include "crypto.h" 14#include "crypto.h"
@@ -279,6 +280,15 @@ bad:
279 return -EINVAL; 280 return -EINVAL;
280} 281}
281 282
283static void ceph_x_authorizer_cleanup(struct ceph_x_authorizer *au)
284{
285 ceph_crypto_key_destroy(&au->session_key);
286 if (au->buf) {
287 ceph_buffer_put(au->buf);
288 au->buf = NULL;
289 }
290}
291
282static int ceph_x_build_authorizer(struct ceph_auth_client *ac, 292static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
283 struct ceph_x_ticket_handler *th, 293 struct ceph_x_ticket_handler *th,
284 struct ceph_x_authorizer *au) 294 struct ceph_x_authorizer *au)
@@ -297,7 +307,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
297 ceph_crypto_key_destroy(&au->session_key); 307 ceph_crypto_key_destroy(&au->session_key);
298 ret = ceph_crypto_key_clone(&au->session_key, &th->session_key); 308 ret = ceph_crypto_key_clone(&au->session_key, &th->session_key);
299 if (ret) 309 if (ret)
300 return ret; 310 goto out_au;
301 311
302 maxlen = sizeof(*msg_a) + sizeof(msg_b) + 312 maxlen = sizeof(*msg_a) + sizeof(msg_b) +
303 ceph_x_encrypt_buflen(ticket_blob_len); 313 ceph_x_encrypt_buflen(ticket_blob_len);
@@ -309,8 +319,8 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
309 if (!au->buf) { 319 if (!au->buf) {
310 au->buf = ceph_buffer_new(maxlen, GFP_NOFS); 320 au->buf = ceph_buffer_new(maxlen, GFP_NOFS);
311 if (!au->buf) { 321 if (!au->buf) {
312 ceph_crypto_key_destroy(&au->session_key); 322 ret = -ENOMEM;
313 return -ENOMEM; 323 goto out_au;
314 } 324 }
315 } 325 }
316 au->service = th->service; 326 au->service = th->service;
@@ -340,7 +350,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
340 ret = ceph_x_encrypt(&au->session_key, &msg_b, sizeof(msg_b), 350 ret = ceph_x_encrypt(&au->session_key, &msg_b, sizeof(msg_b),
341 p, end - p); 351 p, end - p);
342 if (ret < 0) 352 if (ret < 0)
343 goto out_buf; 353 goto out_au;
344 p += ret; 354 p += ret;
345 au->buf->vec.iov_len = p - au->buf->vec.iov_base; 355 au->buf->vec.iov_len = p - au->buf->vec.iov_base;
346 dout(" built authorizer nonce %llx len %d\n", au->nonce, 356 dout(" built authorizer nonce %llx len %d\n", au->nonce,
@@ -348,9 +358,8 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
348 BUG_ON(au->buf->vec.iov_len > maxlen); 358 BUG_ON(au->buf->vec.iov_len > maxlen);
349 return 0; 359 return 0;
350 360
351out_buf: 361out_au:
352 ceph_buffer_put(au->buf); 362 ceph_x_authorizer_cleanup(au);
353 au->buf = NULL;
354 return ret; 363 return ret;
355} 364}
356 365
@@ -624,8 +633,7 @@ static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
624{ 633{
625 struct ceph_x_authorizer *au = (void *)a; 634 struct ceph_x_authorizer *au = (void *)a;
626 635
627 ceph_crypto_key_destroy(&au->session_key); 636 ceph_x_authorizer_cleanup(au);
628 ceph_buffer_put(au->buf);
629 kfree(au); 637 kfree(au);
630} 638}
631 639
@@ -653,8 +661,7 @@ static void ceph_x_destroy(struct ceph_auth_client *ac)
653 remove_ticket_handler(ac, th); 661 remove_ticket_handler(ac, th);
654 } 662 }
655 663
656 if (xi->auth_authorizer.buf) 664 ceph_x_authorizer_cleanup(&xi->auth_authorizer);
657 ceph_buffer_put(xi->auth_authorizer.buf);
658 665
659 kfree(ac->private); 666 kfree(ac->private);
660 ac->private = NULL; 667 ac->private = NULL;
@@ -691,8 +698,10 @@ static int ceph_x_sign_message(struct ceph_auth_handshake *auth,
691 struct ceph_msg *msg) 698 struct ceph_msg *msg)
692{ 699{
693 int ret; 700 int ret;
694 if (!auth->authorizer) 701
702 if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN))
695 return 0; 703 return 0;
704
696 ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer, 705 ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
697 msg, &msg->footer.sig); 706 msg, &msg->footer.sig);
698 if (ret < 0) 707 if (ret < 0)
@@ -707,8 +716,9 @@ static int ceph_x_check_message_signature(struct ceph_auth_handshake *auth,
707 __le64 sig_check; 716 __le64 sig_check;
708 int ret; 717 int ret;
709 718
710 if (!auth->authorizer) 719 if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN))
711 return 0; 720 return 0;
721
712 ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer, 722 ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
713 msg, &sig_check); 723 msg, &sig_check);
714 if (ret < 0) 724 if (ret < 0)
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 78f098a20796..bcbec33c6a14 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -245,6 +245,8 @@ enum {
245 Opt_nocrc, 245 Opt_nocrc,
246 Opt_cephx_require_signatures, 246 Opt_cephx_require_signatures,
247 Opt_nocephx_require_signatures, 247 Opt_nocephx_require_signatures,
248 Opt_cephx_sign_messages,
249 Opt_nocephx_sign_messages,
248 Opt_tcp_nodelay, 250 Opt_tcp_nodelay,
249 Opt_notcp_nodelay, 251 Opt_notcp_nodelay,
250}; 252};
@@ -267,6 +269,8 @@ static match_table_t opt_tokens = {
267 {Opt_nocrc, "nocrc"}, 269 {Opt_nocrc, "nocrc"},
268 {Opt_cephx_require_signatures, "cephx_require_signatures"}, 270 {Opt_cephx_require_signatures, "cephx_require_signatures"},
269 {Opt_nocephx_require_signatures, "nocephx_require_signatures"}, 271 {Opt_nocephx_require_signatures, "nocephx_require_signatures"},
272 {Opt_cephx_sign_messages, "cephx_sign_messages"},
273 {Opt_nocephx_sign_messages, "nocephx_sign_messages"},
270 {Opt_tcp_nodelay, "tcp_nodelay"}, 274 {Opt_tcp_nodelay, "tcp_nodelay"},
271 {Opt_notcp_nodelay, "notcp_nodelay"}, 275 {Opt_notcp_nodelay, "notcp_nodelay"},
272 {-1, NULL} 276 {-1, NULL}
@@ -491,6 +495,12 @@ ceph_parse_options(char *options, const char *dev_name,
491 case Opt_nocephx_require_signatures: 495 case Opt_nocephx_require_signatures:
492 opt->flags |= CEPH_OPT_NOMSGAUTH; 496 opt->flags |= CEPH_OPT_NOMSGAUTH;
493 break; 497 break;
498 case Opt_cephx_sign_messages:
499 opt->flags &= ~CEPH_OPT_NOMSGSIGN;
500 break;
501 case Opt_nocephx_sign_messages:
502 opt->flags |= CEPH_OPT_NOMSGSIGN;
503 break;
494 504
495 case Opt_tcp_nodelay: 505 case Opt_tcp_nodelay:
496 opt->flags |= CEPH_OPT_TCP_NODELAY; 506 opt->flags |= CEPH_OPT_TCP_NODELAY;
@@ -534,6 +544,8 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
534 seq_puts(m, "nocrc,"); 544 seq_puts(m, "nocrc,");
535 if (opt->flags & CEPH_OPT_NOMSGAUTH) 545 if (opt->flags & CEPH_OPT_NOMSGAUTH)
536 seq_puts(m, "nocephx_require_signatures,"); 546 seq_puts(m, "nocephx_require_signatures,");
547 if (opt->flags & CEPH_OPT_NOMSGSIGN)
548 seq_puts(m, "nocephx_sign_messages,");
537 if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0) 549 if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0)
538 seq_puts(m, "notcp_nodelay,"); 550 seq_puts(m, "notcp_nodelay,");
539 551
@@ -596,11 +608,7 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
596 if (ceph_test_opt(client, MYIP)) 608 if (ceph_test_opt(client, MYIP))
597 myaddr = &client->options->my_addr; 609 myaddr = &client->options->my_addr;
598 610
599 ceph_messenger_init(&client->msgr, myaddr, 611 ceph_messenger_init(&client->msgr, myaddr);
600 client->supported_features,
601 client->required_features,
602 ceph_test_opt(client, NOCRC),
603 ceph_test_opt(client, TCP_NODELAY));
604 612
605 /* subsystems */ 613 /* subsystems */
606 err = ceph_monc_init(&client->monc, client); 614 err = ceph_monc_init(&client->monc, client);
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index d1498224c49d..2e9cab09f37b 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -16,8 +16,10 @@ struct ceph_crypto_key {
16 16
17static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key) 17static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
18{ 18{
19 if (key) 19 if (key) {
20 kfree(key->key); 20 kfree(key->key);
21 key->key = NULL;
22 }
21} 23}
22 24
23int ceph_crypto_key_clone(struct ceph_crypto_key *dst, 25int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b9b0e3b5da49..9981039ef4ff 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -509,7 +509,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
509 return ret; 509 return ret;
510 } 510 }
511 511
512 if (con->msgr->tcp_nodelay) { 512 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY)) {
513 int optval = 1; 513 int optval = 1;
514 514
515 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, 515 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
@@ -637,9 +637,6 @@ static int con_close_socket(struct ceph_connection *con)
637static void ceph_msg_remove(struct ceph_msg *msg) 637static void ceph_msg_remove(struct ceph_msg *msg)
638{ 638{
639 list_del_init(&msg->list_head); 639 list_del_init(&msg->list_head);
640 BUG_ON(msg->con == NULL);
641 msg->con->ops->put(msg->con);
642 msg->con = NULL;
643 640
644 ceph_msg_put(msg); 641 ceph_msg_put(msg);
645} 642}
@@ -662,15 +659,14 @@ static void reset_connection(struct ceph_connection *con)
662 659
663 if (con->in_msg) { 660 if (con->in_msg) {
664 BUG_ON(con->in_msg->con != con); 661 BUG_ON(con->in_msg->con != con);
665 con->in_msg->con = NULL;
666 ceph_msg_put(con->in_msg); 662 ceph_msg_put(con->in_msg);
667 con->in_msg = NULL; 663 con->in_msg = NULL;
668 con->ops->put(con);
669 } 664 }
670 665
671 con->connect_seq = 0; 666 con->connect_seq = 0;
672 con->out_seq = 0; 667 con->out_seq = 0;
673 if (con->out_msg) { 668 if (con->out_msg) {
669 BUG_ON(con->out_msg->con != con);
674 ceph_msg_put(con->out_msg); 670 ceph_msg_put(con->out_msg);
675 con->out_msg = NULL; 671 con->out_msg = NULL;
676 } 672 }
@@ -1205,7 +1201,7 @@ static void prepare_write_message_footer(struct ceph_connection *con)
1205 con->out_kvec[v].iov_base = &m->footer; 1201 con->out_kvec[v].iov_base = &m->footer;
1206 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) { 1202 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
1207 if (con->ops->sign_message) 1203 if (con->ops->sign_message)
1208 con->ops->sign_message(con, m); 1204 con->ops->sign_message(m);
1209 else 1205 else
1210 m->footer.sig = 0; 1206 m->footer.sig = 0;
1211 con->out_kvec[v].iov_len = sizeof(m->footer); 1207 con->out_kvec[v].iov_len = sizeof(m->footer);
@@ -1432,7 +1428,8 @@ static int prepare_write_connect(struct ceph_connection *con)
1432 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 1428 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
1433 con->connect_seq, global_seq, proto); 1429 con->connect_seq, global_seq, proto);
1434 1430
1435 con->out_connect.features = cpu_to_le64(con->msgr->supported_features); 1431 con->out_connect.features =
1432 cpu_to_le64(from_msgr(con->msgr)->supported_features);
1436 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 1433 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
1437 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 1434 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
1438 con->out_connect.global_seq = cpu_to_le32(global_seq); 1435 con->out_connect.global_seq = cpu_to_le32(global_seq);
@@ -1527,7 +1524,7 @@ static int write_partial_message_data(struct ceph_connection *con)
1527{ 1524{
1528 struct ceph_msg *msg = con->out_msg; 1525 struct ceph_msg *msg = con->out_msg;
1529 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1526 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1530 bool do_datacrc = !con->msgr->nocrc; 1527 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
1531 u32 crc; 1528 u32 crc;
1532 1529
1533 dout("%s %p msg %p\n", __func__, con, msg); 1530 dout("%s %p msg %p\n", __func__, con, msg);
@@ -1552,8 +1549,8 @@ static int write_partial_message_data(struct ceph_connection *con)
1552 bool need_crc; 1549 bool need_crc;
1553 int ret; 1550 int ret;
1554 1551
1555 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 1552 page = ceph_msg_data_next(cursor, &page_offset, &length,
1556 &last_piece); 1553 &last_piece);
1557 ret = ceph_tcp_sendpage(con->sock, page, page_offset, 1554 ret = ceph_tcp_sendpage(con->sock, page, page_offset,
1558 length, !last_piece); 1555 length, !last_piece);
1559 if (ret <= 0) { 1556 if (ret <= 0) {
@@ -1564,7 +1561,7 @@ static int write_partial_message_data(struct ceph_connection *con)
1564 } 1561 }
1565 if (do_datacrc && cursor->need_crc) 1562 if (do_datacrc && cursor->need_crc)
1566 crc = ceph_crc32c_page(crc, page, page_offset, length); 1563 crc = ceph_crc32c_page(crc, page, page_offset, length);
1567 need_crc = ceph_msg_data_advance(&msg->cursor, (size_t)ret); 1564 need_crc = ceph_msg_data_advance(cursor, (size_t)ret);
1568 } 1565 }
1569 1566
1570 dout("%s %p msg %p done\n", __func__, con, msg); 1567 dout("%s %p msg %p done\n", __func__, con, msg);
@@ -2005,8 +2002,8 @@ static int process_banner(struct ceph_connection *con)
2005 2002
2006static int process_connect(struct ceph_connection *con) 2003static int process_connect(struct ceph_connection *con)
2007{ 2004{
2008 u64 sup_feat = con->msgr->supported_features; 2005 u64 sup_feat = from_msgr(con->msgr)->supported_features;
2009 u64 req_feat = con->msgr->required_features; 2006 u64 req_feat = from_msgr(con->msgr)->required_features;
2010 u64 server_feat = ceph_sanitize_features( 2007 u64 server_feat = ceph_sanitize_features(
2011 le64_to_cpu(con->in_reply.features)); 2008 le64_to_cpu(con->in_reply.features));
2012 int ret; 2009 int ret;
@@ -2232,7 +2229,7 @@ static int read_partial_msg_data(struct ceph_connection *con)
2232{ 2229{
2233 struct ceph_msg *msg = con->in_msg; 2230 struct ceph_msg *msg = con->in_msg;
2234 struct ceph_msg_data_cursor *cursor = &msg->cursor; 2231 struct ceph_msg_data_cursor *cursor = &msg->cursor;
2235 const bool do_datacrc = !con->msgr->nocrc; 2232 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
2236 struct page *page; 2233 struct page *page;
2237 size_t page_offset; 2234 size_t page_offset;
2238 size_t length; 2235 size_t length;
@@ -2246,8 +2243,7 @@ static int read_partial_msg_data(struct ceph_connection *con)
2246 if (do_datacrc) 2243 if (do_datacrc)
2247 crc = con->in_data_crc; 2244 crc = con->in_data_crc;
2248 while (cursor->resid) { 2245 while (cursor->resid) {
2249 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 2246 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL);
2250 NULL);
2251 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); 2247 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
2252 if (ret <= 0) { 2248 if (ret <= 0) {
2253 if (do_datacrc) 2249 if (do_datacrc)
@@ -2258,7 +2254,7 @@ static int read_partial_msg_data(struct ceph_connection *con)
2258 2254
2259 if (do_datacrc) 2255 if (do_datacrc)
2260 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2256 crc = ceph_crc32c_page(crc, page, page_offset, ret);
2261 (void) ceph_msg_data_advance(&msg->cursor, (size_t)ret); 2257 (void) ceph_msg_data_advance(cursor, (size_t)ret);
2262 } 2258 }
2263 if (do_datacrc) 2259 if (do_datacrc)
2264 con->in_data_crc = crc; 2260 con->in_data_crc = crc;
@@ -2278,7 +2274,7 @@ static int read_partial_message(struct ceph_connection *con)
2278 int end; 2274 int end;
2279 int ret; 2275 int ret;
2280 unsigned int front_len, middle_len, data_len; 2276 unsigned int front_len, middle_len, data_len;
2281 bool do_datacrc = !con->msgr->nocrc; 2277 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
2282 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH); 2278 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
2283 u64 seq; 2279 u64 seq;
2284 u32 crc; 2280 u32 crc;
@@ -2423,7 +2419,7 @@ static int read_partial_message(struct ceph_connection *con)
2423 } 2419 }
2424 2420
2425 if (need_sign && con->ops->check_message_signature && 2421 if (need_sign && con->ops->check_message_signature &&
2426 con->ops->check_message_signature(con, m)) { 2422 con->ops->check_message_signature(m)) {
2427 pr_err("read_partial_message %p signature check failed\n", m); 2423 pr_err("read_partial_message %p signature check failed\n", m);
2428 return -EBADMSG; 2424 return -EBADMSG;
2429 } 2425 }
@@ -2438,13 +2434,10 @@ static int read_partial_message(struct ceph_connection *con)
2438 */ 2434 */
2439static void process_message(struct ceph_connection *con) 2435static void process_message(struct ceph_connection *con)
2440{ 2436{
2441 struct ceph_msg *msg; 2437 struct ceph_msg *msg = con->in_msg;
2442 2438
2443 BUG_ON(con->in_msg->con != con); 2439 BUG_ON(con->in_msg->con != con);
2444 con->in_msg->con = NULL;
2445 msg = con->in_msg;
2446 con->in_msg = NULL; 2440 con->in_msg = NULL;
2447 con->ops->put(con);
2448 2441
2449 /* if first message, set peer_name */ 2442 /* if first message, set peer_name */
2450 if (con->peer_name.type == 0) 2443 if (con->peer_name.type == 0)
@@ -2677,7 +2670,7 @@ more:
2677 if (ret <= 0) { 2670 if (ret <= 0) {
2678 switch (ret) { 2671 switch (ret) {
2679 case -EBADMSG: 2672 case -EBADMSG:
2680 con->error_msg = "bad crc"; 2673 con->error_msg = "bad crc/signature";
2681 /* fall through */ 2674 /* fall through */
2682 case -EBADE: 2675 case -EBADE:
2683 ret = -EIO; 2676 ret = -EIO;
@@ -2918,10 +2911,8 @@ static void con_fault(struct ceph_connection *con)
2918 2911
2919 if (con->in_msg) { 2912 if (con->in_msg) {
2920 BUG_ON(con->in_msg->con != con); 2913 BUG_ON(con->in_msg->con != con);
2921 con->in_msg->con = NULL;
2922 ceph_msg_put(con->in_msg); 2914 ceph_msg_put(con->in_msg);
2923 con->in_msg = NULL; 2915 con->in_msg = NULL;
2924 con->ops->put(con);
2925 } 2916 }
2926 2917
2927 /* Requeue anything that hasn't been acked */ 2918 /* Requeue anything that hasn't been acked */
@@ -2952,15 +2943,8 @@ static void con_fault(struct ceph_connection *con)
2952 * initialize a new messenger instance 2943 * initialize a new messenger instance
2953 */ 2944 */
2954void ceph_messenger_init(struct ceph_messenger *msgr, 2945void ceph_messenger_init(struct ceph_messenger *msgr,
2955 struct ceph_entity_addr *myaddr, 2946 struct ceph_entity_addr *myaddr)
2956 u64 supported_features,
2957 u64 required_features,
2958 bool nocrc,
2959 bool tcp_nodelay)
2960{ 2947{
2961 msgr->supported_features = supported_features;
2962 msgr->required_features = required_features;
2963
2964 spin_lock_init(&msgr->global_seq_lock); 2948 spin_lock_init(&msgr->global_seq_lock);
2965 2949
2966 if (myaddr) 2950 if (myaddr)
@@ -2970,8 +2954,6 @@ void ceph_messenger_init(struct ceph_messenger *msgr,
2970 msgr->inst.addr.type = 0; 2954 msgr->inst.addr.type = 0;
2971 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2955 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2972 encode_my_addr(msgr); 2956 encode_my_addr(msgr);
2973 msgr->nocrc = nocrc;
2974 msgr->tcp_nodelay = tcp_nodelay;
2975 2957
2976 atomic_set(&msgr->stopping, 0); 2958 atomic_set(&msgr->stopping, 0);
2977 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns)); 2959 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
@@ -2986,6 +2968,15 @@ void ceph_messenger_fini(struct ceph_messenger *msgr)
2986} 2968}
2987EXPORT_SYMBOL(ceph_messenger_fini); 2969EXPORT_SYMBOL(ceph_messenger_fini);
2988 2970
2971static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
2972{
2973 if (msg->con)
2974 msg->con->ops->put(msg->con);
2975
2976 msg->con = con ? con->ops->get(con) : NULL;
2977 BUG_ON(msg->con != con);
2978}
2979
2989static void clear_standby(struct ceph_connection *con) 2980static void clear_standby(struct ceph_connection *con)
2990{ 2981{
2991 /* come back from STANDBY? */ 2982 /* come back from STANDBY? */
@@ -3017,9 +3008,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
3017 return; 3008 return;
3018 } 3009 }
3019 3010
3020 BUG_ON(msg->con != NULL); 3011 msg_con_set(msg, con);
3021 msg->con = con->ops->get(con);
3022 BUG_ON(msg->con == NULL);
3023 3012
3024 BUG_ON(!list_empty(&msg->list_head)); 3013 BUG_ON(!list_empty(&msg->list_head));
3025 list_add_tail(&msg->list_head, &con->out_queue); 3014 list_add_tail(&msg->list_head, &con->out_queue);
@@ -3047,16 +3036,15 @@ void ceph_msg_revoke(struct ceph_msg *msg)
3047{ 3036{
3048 struct ceph_connection *con = msg->con; 3037 struct ceph_connection *con = msg->con;
3049 3038
3050 if (!con) 3039 if (!con) {
3040 dout("%s msg %p null con\n", __func__, msg);
3051 return; /* Message not in our possession */ 3041 return; /* Message not in our possession */
3042 }
3052 3043
3053 mutex_lock(&con->mutex); 3044 mutex_lock(&con->mutex);
3054 if (!list_empty(&msg->list_head)) { 3045 if (!list_empty(&msg->list_head)) {
3055 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 3046 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
3056 list_del_init(&msg->list_head); 3047 list_del_init(&msg->list_head);
3057 BUG_ON(msg->con == NULL);
3058 msg->con->ops->put(msg->con);
3059 msg->con = NULL;
3060 msg->hdr.seq = 0; 3048 msg->hdr.seq = 0;
3061 3049
3062 ceph_msg_put(msg); 3050 ceph_msg_put(msg);
@@ -3080,16 +3068,13 @@ void ceph_msg_revoke(struct ceph_msg *msg)
3080 */ 3068 */
3081void ceph_msg_revoke_incoming(struct ceph_msg *msg) 3069void ceph_msg_revoke_incoming(struct ceph_msg *msg)
3082{ 3070{
3083 struct ceph_connection *con; 3071 struct ceph_connection *con = msg->con;
3084 3072
3085 BUG_ON(msg == NULL); 3073 if (!con) {
3086 if (!msg->con) {
3087 dout("%s msg %p null con\n", __func__, msg); 3074 dout("%s msg %p null con\n", __func__, msg);
3088
3089 return; /* Message not in our possession */ 3075 return; /* Message not in our possession */
3090 } 3076 }
3091 3077
3092 con = msg->con;
3093 mutex_lock(&con->mutex); 3078 mutex_lock(&con->mutex);
3094 if (con->in_msg == msg) { 3079 if (con->in_msg == msg) {
3095 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 3080 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
@@ -3335,9 +3320,8 @@ static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
3335 } 3320 }
3336 if (msg) { 3321 if (msg) {
3337 BUG_ON(*skip); 3322 BUG_ON(*skip);
3323 msg_con_set(msg, con);
3338 con->in_msg = msg; 3324 con->in_msg = msg;
3339 con->in_msg->con = con->ops->get(con);
3340 BUG_ON(con->in_msg->con == NULL);
3341 } else { 3325 } else {
3342 /* 3326 /*
3343 * Null message pointer means either we should skip 3327 * Null message pointer means either we should skip
@@ -3384,6 +3368,8 @@ static void ceph_msg_release(struct kref *kref)
3384 dout("%s %p\n", __func__, m); 3368 dout("%s %p\n", __func__, m);
3385 WARN_ON(!list_empty(&m->list_head)); 3369 WARN_ON(!list_empty(&m->list_head));
3386 3370
3371 msg_con_set(m, NULL);
3372
3387 /* drop middle, data, if any */ 3373 /* drop middle, data, if any */
3388 if (m->middle) { 3374 if (m->middle) {
3389 ceph_buffer_put(m->middle); 3375 ceph_buffer_put(m->middle);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index f79ccac6699f..f8f235930d88 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -120,11 +120,13 @@ static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
120} 120}
121#endif /* CONFIG_BLOCK */ 121#endif /* CONFIG_BLOCK */
122 122
123#define osd_req_op_data(oreq, whch, typ, fld) \ 123#define osd_req_op_data(oreq, whch, typ, fld) \
124 ({ \ 124({ \
125 BUG_ON(whch >= (oreq)->r_num_ops); \ 125 struct ceph_osd_request *__oreq = (oreq); \
126 &(oreq)->r_ops[whch].typ.fld; \ 126 unsigned int __whch = (whch); \
127 }) 127 BUG_ON(__whch >= __oreq->r_num_ops); \
128 &__oreq->r_ops[__whch].typ.fld; \
129})
128 130
129static struct ceph_osd_data * 131static struct ceph_osd_data *
130osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) 132osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
@@ -1750,8 +1752,7 @@ static void complete_request(struct ceph_osd_request *req)
1750 * handle osd op reply. either call the callback if it is specified, 1752 * handle osd op reply. either call the callback if it is specified,
1751 * or do the completion to wake up the waiting thread. 1753 * or do the completion to wake up the waiting thread.
1752 */ 1754 */
1753static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, 1755static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1754 struct ceph_connection *con)
1755{ 1756{
1756 void *p, *end; 1757 void *p, *end;
1757 struct ceph_osd_request *req; 1758 struct ceph_osd_request *req;
@@ -2807,7 +2808,7 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2807 ceph_osdc_handle_map(osdc, msg); 2808 ceph_osdc_handle_map(osdc, msg);
2808 break; 2809 break;
2809 case CEPH_MSG_OSD_OPREPLY: 2810 case CEPH_MSG_OSD_OPREPLY:
2810 handle_reply(osdc, msg, con); 2811 handle_reply(osdc, msg);
2811 break; 2812 break;
2812 case CEPH_MSG_WATCH_NOTIFY: 2813 case CEPH_MSG_WATCH_NOTIFY:
2813 handle_watch_notify(osdc, msg); 2814 handle_watch_notify(osdc, msg);
@@ -2849,9 +2850,6 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2849 goto out; 2850 goto out;
2850 } 2851 }
2851 2852
2852 if (req->r_reply->con)
2853 dout("%s revoking msg %p from old con %p\n", __func__,
2854 req->r_reply, req->r_reply->con);
2855 ceph_msg_revoke_incoming(req->r_reply); 2853 ceph_msg_revoke_incoming(req->r_reply);
2856 2854
2857 if (front_len > req->r_reply->front_alloc_len) { 2855 if (front_len > req->r_reply->front_alloc_len) {
@@ -2978,17 +2976,19 @@ static int invalidate_authorizer(struct ceph_connection *con)
2978 return ceph_monc_validate_auth(&osdc->client->monc); 2976 return ceph_monc_validate_auth(&osdc->client->monc);
2979} 2977}
2980 2978
2981static int sign_message(struct ceph_connection *con, struct ceph_msg *msg) 2979static int osd_sign_message(struct ceph_msg *msg)
2982{ 2980{
2983 struct ceph_osd *o = con->private; 2981 struct ceph_osd *o = msg->con->private;
2984 struct ceph_auth_handshake *auth = &o->o_auth; 2982 struct ceph_auth_handshake *auth = &o->o_auth;
2983
2985 return ceph_auth_sign_message(auth, msg); 2984 return ceph_auth_sign_message(auth, msg);
2986} 2985}
2987 2986
2988static int check_message_signature(struct ceph_connection *con, struct ceph_msg *msg) 2987static int osd_check_message_signature(struct ceph_msg *msg)
2989{ 2988{
2990 struct ceph_osd *o = con->private; 2989 struct ceph_osd *o = msg->con->private;
2991 struct ceph_auth_handshake *auth = &o->o_auth; 2990 struct ceph_auth_handshake *auth = &o->o_auth;
2991
2992 return ceph_auth_check_message_signature(auth, msg); 2992 return ceph_auth_check_message_signature(auth, msg);
2993} 2993}
2994 2994
@@ -3000,7 +3000,7 @@ static const struct ceph_connection_operations osd_con_ops = {
3000 .verify_authorizer_reply = verify_authorizer_reply, 3000 .verify_authorizer_reply = verify_authorizer_reply,
3001 .invalidate_authorizer = invalidate_authorizer, 3001 .invalidate_authorizer = invalidate_authorizer,
3002 .alloc_msg = alloc_msg, 3002 .alloc_msg = alloc_msg,
3003 .sign_message = sign_message, 3003 .sign_message = osd_sign_message,
3004 .check_message_signature = check_message_signature, 3004 .check_message_signature = osd_check_message_signature,
3005 .fault = osd_reset, 3005 .fault = osd_reset,
3006}; 3006};
diff --git a/net/core/dev.c b/net/core/dev.c
index ab9b8d0d115e..5df6cbce727c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -96,6 +96,7 @@
96#include <linux/skbuff.h> 96#include <linux/skbuff.h>
97#include <net/net_namespace.h> 97#include <net/net_namespace.h>
98#include <net/sock.h> 98#include <net/sock.h>
99#include <net/busy_poll.h>
99#include <linux/rtnetlink.h> 100#include <linux/rtnetlink.h>
100#include <linux/stat.h> 101#include <linux/stat.h>
101#include <net/dst.h> 102#include <net/dst.h>
@@ -182,8 +183,8 @@ EXPORT_SYMBOL(dev_base_lock);
182/* protects napi_hash addition/deletion and napi_gen_id */ 183/* protects napi_hash addition/deletion and napi_gen_id */
183static DEFINE_SPINLOCK(napi_hash_lock); 184static DEFINE_SPINLOCK(napi_hash_lock);
184 185
185static unsigned int napi_gen_id; 186static unsigned int napi_gen_id = NR_CPUS;
186static DEFINE_HASHTABLE(napi_hash, 8); 187static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
187 188
188static seqcount_t devnet_rename_seq; 189static seqcount_t devnet_rename_seq;
189 190
@@ -2403,17 +2404,20 @@ static void skb_warn_bad_offload(const struct sk_buff *skb)
2403{ 2404{
2404 static const netdev_features_t null_features = 0; 2405 static const netdev_features_t null_features = 0;
2405 struct net_device *dev = skb->dev; 2406 struct net_device *dev = skb->dev;
2406 const char *driver = ""; 2407 const char *name = "";
2407 2408
2408 if (!net_ratelimit()) 2409 if (!net_ratelimit())
2409 return; 2410 return;
2410 2411
2411 if (dev && dev->dev.parent) 2412 if (dev) {
2412 driver = dev_driver_string(dev->dev.parent); 2413 if (dev->dev.parent)
2413 2414 name = dev_driver_string(dev->dev.parent);
2415 else
2416 name = netdev_name(dev);
2417 }
2414 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 2418 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2415 "gso_type=%d ip_summed=%d\n", 2419 "gso_type=%d ip_summed=%d\n",
2416 driver, dev ? &dev->features : &null_features, 2420 name, dev ? &dev->features : &null_features,
2417 skb->sk ? &skb->sk->sk_route_caps : &null_features, 2421 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2418 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 2422 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2419 skb_shinfo(skb)->gso_type, skb->ip_summed); 2423 skb_shinfo(skb)->gso_type, skb->ip_summed);
@@ -3018,7 +3022,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3018 int queue_index = 0; 3022 int queue_index = 0;
3019 3023
3020#ifdef CONFIG_XPS 3024#ifdef CONFIG_XPS
3021 if (skb->sender_cpu == 0) 3025 u32 sender_cpu = skb->sender_cpu - 1;
3026
3027 if (sender_cpu >= (u32)NR_CPUS)
3022 skb->sender_cpu = raw_smp_processor_id() + 1; 3028 skb->sender_cpu = raw_smp_processor_id() + 1;
3023#endif 3029#endif
3024 3030
@@ -4350,6 +4356,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4350 4356
4351gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 4357gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4352{ 4358{
4359 skb_mark_napi_id(skb, napi);
4353 trace_napi_gro_receive_entry(skb); 4360 trace_napi_gro_receive_entry(skb);
4354 4361
4355 skb_gro_reset_offset(skb); 4362 skb_gro_reset_offset(skb);
@@ -4383,7 +4390,10 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
4383 4390
4384 if (!skb) { 4391 if (!skb) {
4385 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); 4392 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
4386 napi->skb = skb; 4393 if (skb) {
4394 napi->skb = skb;
4395 skb_mark_napi_id(skb, napi);
4396 }
4387 } 4397 }
4388 return skb; 4398 return skb;
4389} 4399}
@@ -4658,7 +4668,7 @@ void napi_complete_done(struct napi_struct *n, int work_done)
4658EXPORT_SYMBOL(napi_complete_done); 4668EXPORT_SYMBOL(napi_complete_done);
4659 4669
4660/* must be called under rcu_read_lock(), as we dont take a reference */ 4670/* must be called under rcu_read_lock(), as we dont take a reference */
4661struct napi_struct *napi_by_id(unsigned int napi_id) 4671static struct napi_struct *napi_by_id(unsigned int napi_id)
4662{ 4672{
4663 unsigned int hash = napi_id % HASH_SIZE(napi_hash); 4673 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4664 struct napi_struct *napi; 4674 struct napi_struct *napi;
@@ -4669,43 +4679,101 @@ struct napi_struct *napi_by_id(unsigned int napi_id)
4669 4679
4670 return NULL; 4680 return NULL;
4671} 4681}
4672EXPORT_SYMBOL_GPL(napi_by_id);
4673 4682
4674void napi_hash_add(struct napi_struct *napi) 4683#if defined(CONFIG_NET_RX_BUSY_POLL)
4684#define BUSY_POLL_BUDGET 8
4685bool sk_busy_loop(struct sock *sk, int nonblock)
4675{ 4686{
4676 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) { 4687 unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
4688 int (*busy_poll)(struct napi_struct *dev);
4689 struct napi_struct *napi;
4690 int rc = false;
4677 4691
4678 spin_lock(&napi_hash_lock); 4692 rcu_read_lock();
4679 4693
4680 /* 0 is not a valid id, we also skip an id that is taken 4694 napi = napi_by_id(sk->sk_napi_id);
4681 * we expect both events to be extremely rare 4695 if (!napi)
4682 */ 4696 goto out;
4683 napi->napi_id = 0; 4697
4684 while (!napi->napi_id) { 4698 /* Note: ndo_busy_poll method is optional in linux-4.5 */
4685 napi->napi_id = ++napi_gen_id; 4699 busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
4686 if (napi_by_id(napi->napi_id)) 4700
4687 napi->napi_id = 0; 4701 do {
4702 rc = 0;
4703 local_bh_disable();
4704 if (busy_poll) {
4705 rc = busy_poll(napi);
4706 } else if (napi_schedule_prep(napi)) {
4707 void *have = netpoll_poll_lock(napi);
4708
4709 if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
4710 rc = napi->poll(napi, BUSY_POLL_BUDGET);
4711 trace_napi_poll(napi);
4712 if (rc == BUSY_POLL_BUDGET) {
4713 napi_complete_done(napi, rc);
4714 napi_schedule(napi);
4715 }
4716 }
4717 netpoll_poll_unlock(have);
4688 } 4718 }
4719 if (rc > 0)
4720 NET_ADD_STATS_BH(sock_net(sk),
4721 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
4722 local_bh_enable();
4689 4723
4690 hlist_add_head_rcu(&napi->napi_hash_node, 4724 if (rc == LL_FLUSH_FAILED)
4691 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 4725 break; /* permanent failure */
4692 4726
4693 spin_unlock(&napi_hash_lock); 4727 cpu_relax();
4694 } 4728 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
4729 !need_resched() && !busy_loop_timeout(end_time));
4730
4731 rc = !skb_queue_empty(&sk->sk_receive_queue);
4732out:
4733 rcu_read_unlock();
4734 return rc;
4735}
4736EXPORT_SYMBOL(sk_busy_loop);
4737
4738#endif /* CONFIG_NET_RX_BUSY_POLL */
4739
4740void napi_hash_add(struct napi_struct *napi)
4741{
4742 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
4743 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
4744 return;
4745
4746 spin_lock(&napi_hash_lock);
4747
4748 /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
4749 do {
4750 if (unlikely(++napi_gen_id < NR_CPUS + 1))
4751 napi_gen_id = NR_CPUS + 1;
4752 } while (napi_by_id(napi_gen_id));
4753 napi->napi_id = napi_gen_id;
4754
4755 hlist_add_head_rcu(&napi->napi_hash_node,
4756 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4757
4758 spin_unlock(&napi_hash_lock);
4695} 4759}
4696EXPORT_SYMBOL_GPL(napi_hash_add); 4760EXPORT_SYMBOL_GPL(napi_hash_add);
4697 4761
4698/* Warning : caller is responsible to make sure rcu grace period 4762/* Warning : caller is responsible to make sure rcu grace period
4699 * is respected before freeing memory containing @napi 4763 * is respected before freeing memory containing @napi
4700 */ 4764 */
4701void napi_hash_del(struct napi_struct *napi) 4765bool napi_hash_del(struct napi_struct *napi)
4702{ 4766{
4767 bool rcu_sync_needed = false;
4768
4703 spin_lock(&napi_hash_lock); 4769 spin_lock(&napi_hash_lock);
4704 4770
4705 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) 4771 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
4772 rcu_sync_needed = true;
4706 hlist_del_rcu(&napi->napi_hash_node); 4773 hlist_del_rcu(&napi->napi_hash_node);
4707 4774 }
4708 spin_unlock(&napi_hash_lock); 4775 spin_unlock(&napi_hash_lock);
4776 return rcu_sync_needed;
4709} 4777}
4710EXPORT_SYMBOL_GPL(napi_hash_del); 4778EXPORT_SYMBOL_GPL(napi_hash_del);
4711 4779
@@ -4741,6 +4809,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4741 napi->poll_owner = -1; 4809 napi->poll_owner = -1;
4742#endif 4810#endif
4743 set_bit(NAPI_STATE_SCHED, &napi->state); 4811 set_bit(NAPI_STATE_SCHED, &napi->state);
4812 napi_hash_add(napi);
4744} 4813}
4745EXPORT_SYMBOL(netif_napi_add); 4814EXPORT_SYMBOL(netif_napi_add);
4746 4815
@@ -4760,8 +4829,12 @@ void napi_disable(struct napi_struct *n)
4760} 4829}
4761EXPORT_SYMBOL(napi_disable); 4830EXPORT_SYMBOL(napi_disable);
4762 4831
4832/* Must be called in process context */
4763void netif_napi_del(struct napi_struct *napi) 4833void netif_napi_del(struct napi_struct *napi)
4764{ 4834{
4835 might_sleep();
4836 if (napi_hash_del(napi))
4837 synchronize_net();
4765 list_del_init(&napi->dev_list); 4838 list_del_init(&napi->dev_list);
4766 napi_free_frags(napi); 4839 napi_free_frags(napi);
4767 4840
@@ -6426,11 +6499,16 @@ int __netdev_update_features(struct net_device *dev)
6426 6499
6427 if (dev->netdev_ops->ndo_set_features) 6500 if (dev->netdev_ops->ndo_set_features)
6428 err = dev->netdev_ops->ndo_set_features(dev, features); 6501 err = dev->netdev_ops->ndo_set_features(dev, features);
6502 else
6503 err = 0;
6429 6504
6430 if (unlikely(err < 0)) { 6505 if (unlikely(err < 0)) {
6431 netdev_err(dev, 6506 netdev_err(dev,
6432 "set_features() failed (%d); wanted %pNF, left %pNF\n", 6507 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6433 err, &features, &dev->features); 6508 err, &features, &dev->features);
6509 /* return non-0 since some features might have changed and
6510 * it's better to fire a spurious notification than miss it
6511 */
6434 return -1; 6512 return -1;
6435 } 6513 }
6436 6514
@@ -7156,11 +7234,13 @@ EXPORT_SYMBOL(alloc_netdev_mqs);
7156 * This function does the last stage of destroying an allocated device 7234 * This function does the last stage of destroying an allocated device
7157 * interface. The reference to the device object is released. 7235 * interface. The reference to the device object is released.
7158 * If this is the last reference then it will be freed. 7236 * If this is the last reference then it will be freed.
7237 * Must be called in process context.
7159 */ 7238 */
7160void free_netdev(struct net_device *dev) 7239void free_netdev(struct net_device *dev)
7161{ 7240{
7162 struct napi_struct *p, *n; 7241 struct napi_struct *p, *n;
7163 7242
7243 might_sleep();
7164 netif_free_tx_queues(dev); 7244 netif_free_tx_queues(dev);
7165#ifdef CONFIG_SYSFS 7245#ifdef CONFIG_SYSFS
7166 kvfree(dev->_rx); 7246 kvfree(dev->_rx);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1aa8437ed6c4..e6af42da28d9 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -857,7 +857,7 @@ static void neigh_probe(struct neighbour *neigh)
857 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); 857 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
858 /* keep skb alive even if arp_queue overflows */ 858 /* keep skb alive even if arp_queue overflows */
859 if (skb) 859 if (skb)
860 skb = skb_copy(skb, GFP_ATOMIC); 860 skb = skb_clone(skb, GFP_ATOMIC);
861 write_unlock(&neigh->lock); 861 write_unlock(&neigh->lock);
862 neigh->ops->solicit(neigh, skb); 862 neigh->ops->solicit(neigh, skb);
863 atomic_inc(&neigh->probes); 863 atomic_inc(&neigh->probes);
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index adef015b2f41..92da5e4ceb4f 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -32,6 +32,10 @@
32#include <trace/events/sock.h> 32#include <trace/events/sock.h>
33#include <trace/events/udp.h> 33#include <trace/events/udp.h>
34#include <trace/events/fib.h> 34#include <trace/events/fib.h>
35#if IS_ENABLED(CONFIG_IPV6)
36#include <trace/events/fib6.h>
37EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
38#endif
35 39
36EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb); 40EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
37 41
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 504bd17b7456..34ba7a08876d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1045,15 +1045,156 @@ static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1045 return 0; 1045 return 0;
1046} 1046}
1047 1047
1048static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1049 struct net_device *dev)
1050{
1051 const struct rtnl_link_stats64 *stats;
1052 struct rtnl_link_stats64 temp;
1053 struct nlattr *attr;
1054
1055 stats = dev_get_stats(dev, &temp);
1056
1057 attr = nla_reserve(skb, IFLA_STATS,
1058 sizeof(struct rtnl_link_stats));
1059 if (!attr)
1060 return -EMSGSIZE;
1061
1062 copy_rtnl_link_stats(nla_data(attr), stats);
1063
1064 attr = nla_reserve(skb, IFLA_STATS64,
1065 sizeof(struct rtnl_link_stats64));
1066 if (!attr)
1067 return -EMSGSIZE;
1068
1069 copy_rtnl_link_stats64(nla_data(attr), stats);
1070
1071 return 0;
1072}
1073
1074static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1075 struct net_device *dev,
1076 int vfs_num,
1077 struct nlattr *vfinfo)
1078{
1079 struct ifla_vf_rss_query_en vf_rss_query_en;
1080 struct ifla_vf_link_state vf_linkstate;
1081 struct ifla_vf_spoofchk vf_spoofchk;
1082 struct ifla_vf_tx_rate vf_tx_rate;
1083 struct ifla_vf_stats vf_stats;
1084 struct ifla_vf_trust vf_trust;
1085 struct ifla_vf_vlan vf_vlan;
1086 struct ifla_vf_rate vf_rate;
1087 struct nlattr *vf, *vfstats;
1088 struct ifla_vf_mac vf_mac;
1089 struct ifla_vf_info ivi;
1090
1091 /* Not all SR-IOV capable drivers support the
1092 * spoofcheck and "RSS query enable" query. Preset to
1093 * -1 so the user space tool can detect that the driver
1094 * didn't report anything.
1095 */
1096 ivi.spoofchk = -1;
1097 ivi.rss_query_en = -1;
1098 ivi.trusted = -1;
1099 memset(ivi.mac, 0, sizeof(ivi.mac));
1100 /* The default value for VF link state is "auto"
1101 * IFLA_VF_LINK_STATE_AUTO which equals zero
1102 */
1103 ivi.linkstate = 0;
1104 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1105 return 0;
1106
1107 vf_mac.vf =
1108 vf_vlan.vf =
1109 vf_rate.vf =
1110 vf_tx_rate.vf =
1111 vf_spoofchk.vf =
1112 vf_linkstate.vf =
1113 vf_rss_query_en.vf =
1114 vf_trust.vf = ivi.vf;
1115
1116 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1117 vf_vlan.vlan = ivi.vlan;
1118 vf_vlan.qos = ivi.qos;
1119 vf_tx_rate.rate = ivi.max_tx_rate;
1120 vf_rate.min_tx_rate = ivi.min_tx_rate;
1121 vf_rate.max_tx_rate = ivi.max_tx_rate;
1122 vf_spoofchk.setting = ivi.spoofchk;
1123 vf_linkstate.link_state = ivi.linkstate;
1124 vf_rss_query_en.setting = ivi.rss_query_en;
1125 vf_trust.setting = ivi.trusted;
1126 vf = nla_nest_start(skb, IFLA_VF_INFO);
1127 if (!vf) {
1128 nla_nest_cancel(skb, vfinfo);
1129 return -EMSGSIZE;
1130 }
1131 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1132 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1133 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1134 &vf_rate) ||
1135 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1136 &vf_tx_rate) ||
1137 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1138 &vf_spoofchk) ||
1139 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1140 &vf_linkstate) ||
1141 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1142 sizeof(vf_rss_query_en),
1143 &vf_rss_query_en) ||
1144 nla_put(skb, IFLA_VF_TRUST,
1145 sizeof(vf_trust), &vf_trust))
1146 return -EMSGSIZE;
1147 memset(&vf_stats, 0, sizeof(vf_stats));
1148 if (dev->netdev_ops->ndo_get_vf_stats)
1149 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1150 &vf_stats);
1151 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1152 if (!vfstats) {
1153 nla_nest_cancel(skb, vf);
1154 nla_nest_cancel(skb, vfinfo);
1155 return -EMSGSIZE;
1156 }
1157 if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
1158 vf_stats.rx_packets) ||
1159 nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
1160 vf_stats.tx_packets) ||
1161 nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
1162 vf_stats.rx_bytes) ||
1163 nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
1164 vf_stats.tx_bytes) ||
1165 nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
1166 vf_stats.broadcast) ||
1167 nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
1168 vf_stats.multicast))
1169 return -EMSGSIZE;
1170 nla_nest_end(skb, vfstats);
1171 nla_nest_end(skb, vf);
1172 return 0;
1173}
1174
1175static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1176{
1177 struct rtnl_link_ifmap map = {
1178 .mem_start = dev->mem_start,
1179 .mem_end = dev->mem_end,
1180 .base_addr = dev->base_addr,
1181 .irq = dev->irq,
1182 .dma = dev->dma,
1183 .port = dev->if_port,
1184 };
1185 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
1186 return -EMSGSIZE;
1187
1188 return 0;
1189}
1190
1048static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 1191static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1049 int type, u32 pid, u32 seq, u32 change, 1192 int type, u32 pid, u32 seq, u32 change,
1050 unsigned int flags, u32 ext_filter_mask) 1193 unsigned int flags, u32 ext_filter_mask)
1051{ 1194{
1052 struct ifinfomsg *ifm; 1195 struct ifinfomsg *ifm;
1053 struct nlmsghdr *nlh; 1196 struct nlmsghdr *nlh;
1054 struct rtnl_link_stats64 temp; 1197 struct nlattr *af_spec;
1055 const struct rtnl_link_stats64 *stats;
1056 struct nlattr *attr, *af_spec;
1057 struct rtnl_af_ops *af_ops; 1198 struct rtnl_af_ops *af_ops;
1058 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 1199 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1059 1200
@@ -1096,18 +1237,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1096 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) 1237 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1097 goto nla_put_failure; 1238 goto nla_put_failure;
1098 1239
1099 if (1) { 1240 if (rtnl_fill_link_ifmap(skb, dev))
1100 struct rtnl_link_ifmap map = { 1241 goto nla_put_failure;
1101 .mem_start = dev->mem_start,
1102 .mem_end = dev->mem_end,
1103 .base_addr = dev->base_addr,
1104 .irq = dev->irq,
1105 .dma = dev->dma,
1106 .port = dev->if_port,
1107 };
1108 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
1109 goto nla_put_failure;
1110 }
1111 1242
1112 if (dev->addr_len) { 1243 if (dev->addr_len) {
1113 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1244 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
@@ -1124,128 +1255,27 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1124 if (rtnl_phys_switch_id_fill(skb, dev)) 1255 if (rtnl_phys_switch_id_fill(skb, dev))
1125 goto nla_put_failure; 1256 goto nla_put_failure;
1126 1257
1127 attr = nla_reserve(skb, IFLA_STATS, 1258 if (rtnl_fill_stats(skb, dev))
1128 sizeof(struct rtnl_link_stats));
1129 if (attr == NULL)
1130 goto nla_put_failure;
1131
1132 stats = dev_get_stats(dev, &temp);
1133 copy_rtnl_link_stats(nla_data(attr), stats);
1134
1135 attr = nla_reserve(skb, IFLA_STATS64,
1136 sizeof(struct rtnl_link_stats64));
1137 if (attr == NULL)
1138 goto nla_put_failure; 1259 goto nla_put_failure;
1139 copy_rtnl_link_stats64(nla_data(attr), stats);
1140 1260
1141 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) && 1261 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
1142 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent))) 1262 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
1143 goto nla_put_failure; 1263 goto nla_put_failure;
1144 1264
1145 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent 1265 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
1146 && (ext_filter_mask & RTEXT_FILTER_VF)) { 1266 ext_filter_mask & RTEXT_FILTER_VF) {
1147 int i; 1267 int i;
1148 1268 struct nlattr *vfinfo;
1149 struct nlattr *vfinfo, *vf, *vfstats;
1150 int num_vfs = dev_num_vf(dev->dev.parent); 1269 int num_vfs = dev_num_vf(dev->dev.parent);
1151 1270
1152 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); 1271 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1153 if (!vfinfo) 1272 if (!vfinfo)
1154 goto nla_put_failure; 1273 goto nla_put_failure;
1155 for (i = 0; i < num_vfs; i++) { 1274 for (i = 0; i < num_vfs; i++) {
1156 struct ifla_vf_info ivi; 1275 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1157 struct ifla_vf_mac vf_mac;
1158 struct ifla_vf_vlan vf_vlan;
1159 struct ifla_vf_rate vf_rate;
1160 struct ifla_vf_tx_rate vf_tx_rate;
1161 struct ifla_vf_spoofchk vf_spoofchk;
1162 struct ifla_vf_link_state vf_linkstate;
1163 struct ifla_vf_rss_query_en vf_rss_query_en;
1164 struct ifla_vf_stats vf_stats;
1165 struct ifla_vf_trust vf_trust;
1166
1167 /*
1168 * Not all SR-IOV capable drivers support the
1169 * spoofcheck and "RSS query enable" query. Preset to
1170 * -1 so the user space tool can detect that the driver
1171 * didn't report anything.
1172 */
1173 ivi.spoofchk = -1;
1174 ivi.rss_query_en = -1;
1175 ivi.trusted = -1;
1176 memset(ivi.mac, 0, sizeof(ivi.mac));
1177 /* The default value for VF link state is "auto"
1178 * IFLA_VF_LINK_STATE_AUTO which equals zero
1179 */
1180 ivi.linkstate = 0;
1181 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
1182 break;
1183 vf_mac.vf =
1184 vf_vlan.vf =
1185 vf_rate.vf =
1186 vf_tx_rate.vf =
1187 vf_spoofchk.vf =
1188 vf_linkstate.vf =
1189 vf_rss_query_en.vf =
1190 vf_trust.vf = ivi.vf;
1191
1192 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1193 vf_vlan.vlan = ivi.vlan;
1194 vf_vlan.qos = ivi.qos;
1195 vf_tx_rate.rate = ivi.max_tx_rate;
1196 vf_rate.min_tx_rate = ivi.min_tx_rate;
1197 vf_rate.max_tx_rate = ivi.max_tx_rate;
1198 vf_spoofchk.setting = ivi.spoofchk;
1199 vf_linkstate.link_state = ivi.linkstate;
1200 vf_rss_query_en.setting = ivi.rss_query_en;
1201 vf_trust.setting = ivi.trusted;
1202 vf = nla_nest_start(skb, IFLA_VF_INFO);
1203 if (!vf) {
1204 nla_nest_cancel(skb, vfinfo);
1205 goto nla_put_failure;
1206 }
1207 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1208 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1209 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1210 &vf_rate) ||
1211 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1212 &vf_tx_rate) ||
1213 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1214 &vf_spoofchk) ||
1215 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1216 &vf_linkstate) ||
1217 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1218 sizeof(vf_rss_query_en),
1219 &vf_rss_query_en) ||
1220 nla_put(skb, IFLA_VF_TRUST,
1221 sizeof(vf_trust), &vf_trust))
1222 goto nla_put_failure; 1276 goto nla_put_failure;
1223 memset(&vf_stats, 0, sizeof(vf_stats));
1224 if (dev->netdev_ops->ndo_get_vf_stats)
1225 dev->netdev_ops->ndo_get_vf_stats(dev, i,
1226 &vf_stats);
1227 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1228 if (!vfstats) {
1229 nla_nest_cancel(skb, vf);
1230 nla_nest_cancel(skb, vfinfo);
1231 goto nla_put_failure;
1232 }
1233 if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
1234 vf_stats.rx_packets) ||
1235 nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
1236 vf_stats.tx_packets) ||
1237 nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
1238 vf_stats.rx_bytes) ||
1239 nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
1240 vf_stats.tx_bytes) ||
1241 nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
1242 vf_stats.broadcast) ||
1243 nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
1244 vf_stats.multicast))
1245 goto nla_put_failure;
1246 nla_nest_end(skb, vfstats);
1247 nla_nest_end(skb, vf);
1248 } 1277 }
1278
1249 nla_nest_end(skb, vfinfo); 1279 nla_nest_end(skb, vfinfo);
1250 } 1280 }
1251 1281
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index aa41e6dd6429..152b9c70e252 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4268,7 +4268,8 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4268 return NULL; 4268 return NULL;
4269 } 4269 }
4270 4270
4271 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); 4271 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len,
4272 2 * ETH_ALEN);
4272 skb->mac_header += VLAN_HLEN; 4273 skb->mac_header += VLAN_HLEN;
4273 return skb; 4274 return skb;
4274} 4275}
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 1eba07feb34a..b7448c8490ac 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -21,8 +21,10 @@
21#include <linux/of_mdio.h> 21#include <linux/of_mdio.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/of_net.h> 23#include <linux/of_net.h>
24#include <linux/of_gpio.h>
24#include <linux/sysfs.h> 25#include <linux/sysfs.h>
25#include <linux/phy_fixed.h> 26#include <linux/phy_fixed.h>
27#include <linux/gpio/consumer.h>
26#include "dsa_priv.h" 28#include "dsa_priv.h"
27 29
28char dsa_driver_version[] = "0.1"; 30char dsa_driver_version[] = "0.1";
@@ -688,6 +690,9 @@ static int dsa_of_probe(struct device *dev)
688 const char *port_name; 690 const char *port_name;
689 int chip_index, port_index; 691 int chip_index, port_index;
690 const unsigned int *sw_addr, *port_reg; 692 const unsigned int *sw_addr, *port_reg;
693 int gpio;
694 enum of_gpio_flags of_flags;
695 unsigned long flags;
691 u32 eeprom_len; 696 u32 eeprom_len;
692 int ret; 697 int ret;
693 698
@@ -766,6 +771,19 @@ static int dsa_of_probe(struct device *dev)
766 put_device(cd->host_dev); 771 put_device(cd->host_dev);
767 cd->host_dev = &mdio_bus_switch->dev; 772 cd->host_dev = &mdio_bus_switch->dev;
768 } 773 }
774 gpio = of_get_named_gpio_flags(child, "reset-gpios", 0,
775 &of_flags);
776 if (gpio_is_valid(gpio)) {
777 flags = (of_flags == OF_GPIO_ACTIVE_LOW ?
778 GPIOF_ACTIVE_LOW : 0);
779 ret = devm_gpio_request_one(dev, gpio, flags,
780 "switch_reset");
781 if (ret)
782 goto out_free_chip;
783
784 cd->reset = gpio_to_desc(gpio);
785 gpiod_direction_output(cd->reset, 0);
786 }
769 787
770 for_each_available_child_of_node(child, port) { 788 for_each_available_child_of_node(child, port) {
771 port_reg = of_get_property(port, "reg", NULL); 789 port_reg = of_get_property(port, "reg", NULL);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 1feb15f23de8..46b9c887bede 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -563,7 +563,7 @@ static void reqsk_timer_handler(unsigned long data)
563 int max_retries, thresh; 563 int max_retries, thresh;
564 u8 defer_accept; 564 u8 defer_accept;
565 565
566 if (sk_listener->sk_state != TCP_LISTEN) 566 if (sk_state_load(sk_listener) != TCP_LISTEN)
567 goto drop; 567 goto drop;
568 568
569 max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; 569 max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
@@ -749,7 +749,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
749 * It is OK, because this socket enters to hash table only 749 * It is OK, because this socket enters to hash table only
750 * after validation is complete. 750 * after validation is complete.
751 */ 751 */
752 sk->sk_state = TCP_LISTEN; 752 sk_state_store(sk, TCP_LISTEN);
753 if (!sk->sk_prot->get_port(sk, inet->inet_num)) { 753 if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
754 inet->inet_sport = htons(inet->inet_num); 754 inet->inet_sport = htons(inet->inet_num);
755 755
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 0bc7412d9e14..67f7c9de0b16 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -65,15 +65,6 @@
65#include <net/checksum.h> 65#include <net/checksum.h>
66#include <asm/processor.h> 66#include <asm/processor.h>
67 67
68/* Define this to allow debugging output */
69#undef IPCONFIG_DEBUG
70
71#ifdef IPCONFIG_DEBUG
72#define DBG(x) printk x
73#else
74#define DBG(x) do { } while(0)
75#endif
76
77#if defined(CONFIG_IP_PNP_DHCP) 68#if defined(CONFIG_IP_PNP_DHCP)
78#define IPCONFIG_DHCP 69#define IPCONFIG_DHCP
79#endif 70#endif
@@ -227,7 +218,7 @@ static int __init ic_open_devs(void)
227 if (dev->mtu >= 364) 218 if (dev->mtu >= 364)
228 able |= IC_BOOTP; 219 able |= IC_BOOTP;
229 else 220 else
230 pr_warn("DHCP/BOOTP: Ignoring device %s, MTU %d too small", 221 pr_warn("DHCP/BOOTP: Ignoring device %s, MTU %d too small\n",
231 dev->name, dev->mtu); 222 dev->name, dev->mtu);
232 if (!(dev->flags & IFF_NOARP)) 223 if (!(dev->flags & IFF_NOARP))
233 able |= IC_RARP; 224 able |= IC_RARP;
@@ -254,8 +245,8 @@ static int __init ic_open_devs(void)
254 else 245 else
255 d->xid = 0; 246 d->xid = 0;
256 ic_proto_have_if |= able; 247 ic_proto_have_if |= able;
257 DBG(("IP-Config: %s UP (able=%d, xid=%08x)\n", 248 pr_debug("IP-Config: %s UP (able=%d, xid=%08x)\n",
258 dev->name, able, d->xid)); 249 dev->name, able, d->xid);
259 } 250 }
260 } 251 }
261 252
@@ -311,7 +302,7 @@ static void __init ic_close_devs(void)
311 next = d->next; 302 next = d->next;
312 dev = d->dev; 303 dev = d->dev;
313 if (dev != ic_dev && !netdev_uses_dsa(dev)) { 304 if (dev != ic_dev && !netdev_uses_dsa(dev)) {
314 DBG(("IP-Config: Downing %s\n", dev->name)); 305 pr_debug("IP-Config: Downing %s\n", dev->name);
315 dev_change_flags(dev, d->flags); 306 dev_change_flags(dev, d->flags);
316 } 307 }
317 kfree(d); 308 kfree(d);
@@ -464,7 +455,8 @@ static int __init ic_defaults(void)
464 &ic_myaddr); 455 &ic_myaddr);
465 return -1; 456 return -1;
466 } 457 }
467 printk("IP-Config: Guessing netmask %pI4\n", &ic_netmask); 458 pr_notice("IP-Config: Guessing netmask %pI4\n",
459 &ic_netmask);
468 } 460 }
469 461
470 return 0; 462 return 0;
@@ -675,9 +667,7 @@ ic_dhcp_init_options(u8 *options)
675 u8 *e = options; 667 u8 *e = options;
676 int len; 668 int len;
677 669
678#ifdef IPCONFIG_DEBUG 670 pr_debug("DHCP: Sending message type %d\n", mt);
679 printk("DHCP: Sending message type %d\n", mt);
680#endif
681 671
682 memcpy(e, ic_bootp_cookie, 4); /* RFC1048 Magic Cookie */ 672 memcpy(e, ic_bootp_cookie, 4); /* RFC1048 Magic Cookie */
683 e += 4; 673 e += 4;
@@ -847,7 +837,8 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
847 else if (dev->type == ARPHRD_FDDI) 837 else if (dev->type == ARPHRD_FDDI)
848 b->htype = ARPHRD_ETHER; 838 b->htype = ARPHRD_ETHER;
849 else { 839 else {
850 printk("Unknown ARP type 0x%04x for device %s\n", dev->type, dev->name); 840 pr_warn("Unknown ARP type 0x%04x for device %s\n", dev->type,
841 dev->name);
851 b->htype = dev->type; /* can cause undefined behavior */ 842 b->htype = dev->type; /* can cause undefined behavior */
852 } 843 }
853 844
@@ -904,14 +895,12 @@ static void __init ic_do_bootp_ext(u8 *ext)
904 int i; 895 int i;
905 __be16 mtu; 896 __be16 mtu;
906 897
907#ifdef IPCONFIG_DEBUG
908 u8 *c; 898 u8 *c;
909 899
910 printk("DHCP/BOOTP: Got extension %d:",*ext); 900 pr_debug("DHCP/BOOTP: Got extension %d:", *ext);
911 for (c=ext+2; c<ext+2+ext[1]; c++) 901 for (c=ext+2; c<ext+2+ext[1]; c++)
912 printk(" %02x", *c); 902 pr_debug(" %02x", *c);
913 printk("\n"); 903 pr_debug("\n");
914#endif
915 904
916 switch (*ext++) { 905 switch (*ext++) {
917 case 1: /* Subnet mask */ 906 case 1: /* Subnet mask */
@@ -1080,9 +1069,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
1080 } 1069 }
1081 } 1070 }
1082 1071
1083#ifdef IPCONFIG_DEBUG 1072 pr_debug("DHCP: Got message type %d\n", mt);
1084 printk("DHCP: Got message type %d\n", mt);
1085#endif
1086 1073
1087 switch (mt) { 1074 switch (mt) {
1088 case DHCPOFFER: 1075 case DHCPOFFER:
@@ -1095,10 +1082,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
1095 /* Let's accept that offer. */ 1082 /* Let's accept that offer. */
1096 ic_myaddr = b->your_ip; 1083 ic_myaddr = b->your_ip;
1097 ic_servaddr = server_id; 1084 ic_servaddr = server_id;
1098#ifdef IPCONFIG_DEBUG 1085 pr_debug("DHCP: Offered address %pI4 by server %pI4\n",
1099 printk("DHCP: Offered address %pI4 by server %pI4\n", 1086 &ic_myaddr, &b->iph.saddr);
1100 &ic_myaddr, &b->iph.saddr);
1101#endif
1102 /* The DHCP indicated server address takes 1087 /* The DHCP indicated server address takes
1103 * precedence over the bootp header one if 1088 * precedence over the bootp header one if
1104 * they are different. 1089 * they are different.
@@ -1295,11 +1280,10 @@ static int __init ic_dynamic(void)
1295 return -1; 1280 return -1;
1296 } 1281 }
1297 1282
1298 printk("IP-Config: Got %s answer from %pI4, ", 1283 pr_info("IP-Config: Got %s answer from %pI4, my address is %pI4\n",
1299 ((ic_got_reply & IC_RARP) ? "RARP" 1284 ((ic_got_reply & IC_RARP) ? "RARP"
1300 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), 1285 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
1301 &ic_addrservaddr); 1286 &ic_addrservaddr, &ic_myaddr);
1302 pr_cont("my address is %pI4\n", &ic_myaddr);
1303 1287
1304 return 0; 1288 return 0;
1305} 1289}
@@ -1426,7 +1410,7 @@ static int __init ip_auto_config(void)
1426 if (!ic_enable) 1410 if (!ic_enable)
1427 return 0; 1411 return 0;
1428 1412
1429 DBG(("IP-Config: Entered.\n")); 1413 pr_debug("IP-Config: Entered.\n");
1430#ifdef IPCONFIG_DYNAMIC 1414#ifdef IPCONFIG_DYNAMIC
1431 try_try_again: 1415 try_try_again:
1432#endif 1416#endif
@@ -1542,7 +1526,7 @@ static int __init ip_auto_config(void)
1542 pr_cont(", mtu=%d", ic_dev_mtu); 1526 pr_cont(", mtu=%d", ic_dev_mtu);
1543 for (i = 0; i < CONF_NAMESERVERS_MAX; i++) 1527 for (i = 0; i < CONF_NAMESERVERS_MAX; i++)
1544 if (ic_nameservers[i] != NONE) { 1528 if (ic_nameservers[i] != NONE) {
1545 pr_info(" nameserver%u=%pI4", 1529 pr_cont(" nameserver%u=%pI4",
1546 i, &ic_nameservers[i]); 1530 i, &ic_nameservers[i]);
1547 break; 1531 break;
1548 } 1532 }
@@ -1585,7 +1569,7 @@ static int __init ic_proto_name(char *name)
1585 return 1; 1569 return 1;
1586 *v = 0; 1570 *v = 0;
1587 if (kstrtou8(client_id, 0, dhcp_client_identifier)) 1571 if (kstrtou8(client_id, 0, dhcp_client_identifier))
1588 DBG("DHCP: Invalid client identifier type\n"); 1572 pr_debug("DHCP: Invalid client identifier type\n");
1589 strncpy(dhcp_client_identifier + 1, v + 1, 251); 1573 strncpy(dhcp_client_identifier + 1, v + 1, 251);
1590 *v = ','; 1574 *v = ',';
1591 } 1575 }
@@ -1644,7 +1628,7 @@ static int __init ip_auto_config_setup(char *addrs)
1644 if ((cp = strchr(ip, ':'))) 1628 if ((cp = strchr(ip, ':')))
1645 *cp++ = '\0'; 1629 *cp++ = '\0';
1646 if (strlen(ip) > 0) { 1630 if (strlen(ip) > 0) {
1647 DBG(("IP-Config: Parameter #%d: `%s'\n", num, ip)); 1631 pr_debug("IP-Config: Parameter #%d: `%s'\n", num, ip);
1648 switch (num) { 1632 switch (num) {
1649 case 0: 1633 case 0:
1650 if ((ic_myaddr = in_aton(ip)) == ANY) 1634 if ((ic_myaddr = in_aton(ip)) == ANY)
@@ -1716,7 +1700,7 @@ static int __init vendor_class_identifier_setup(char *addrs)
1716 if (strlcpy(vendor_class_identifier, addrs, 1700 if (strlcpy(vendor_class_identifier, addrs,
1717 sizeof(vendor_class_identifier)) 1701 sizeof(vendor_class_identifier))
1718 >= sizeof(vendor_class_identifier)) 1702 >= sizeof(vendor_class_identifier))
1719 pr_warn("DHCP: vendorclass too long, truncated to \"%s\"", 1703 pr_warn("DHCP: vendorclass too long, truncated to \"%s\"\n",
1720 vendor_class_identifier); 1704 vendor_class_identifier);
1721 return 1; 1705 return 1;
1722} 1706}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 92dd4b74d513..a2d248d9c35c 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -67,10 +67,6 @@
67#include <net/fib_rules.h> 67#include <net/fib_rules.h>
68#include <linux/netconf.h> 68#include <linux/netconf.h>
69 69
70#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
71#define CONFIG_IP_PIMSM 1
72#endif
73
74struct mr_table { 70struct mr_table {
75 struct list_head list; 71 struct list_head list;
76 possible_net_t net; 72 possible_net_t net;
@@ -84,9 +80,7 @@ struct mr_table {
84 atomic_t cache_resolve_queue_len; 80 atomic_t cache_resolve_queue_len;
85 bool mroute_do_assert; 81 bool mroute_do_assert;
86 bool mroute_do_pim; 82 bool mroute_do_pim;
87#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
88 int mroute_reg_vif_num; 83 int mroute_reg_vif_num;
89#endif
90}; 84};
91 85
92struct ipmr_rule { 86struct ipmr_rule {
@@ -97,15 +91,18 @@ struct ipmr_result {
97 struct mr_table *mrt; 91 struct mr_table *mrt;
98}; 92};
99 93
94static inline bool pimsm_enabled(void)
95{
96 return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2);
97}
98
100/* Big lock, protecting vif table, mrt cache and mroute socket state. 99/* Big lock, protecting vif table, mrt cache and mroute socket state.
101 * Note that the changes are semaphored via rtnl_lock. 100 * Note that the changes are semaphored via rtnl_lock.
102 */ 101 */
103 102
104static DEFINE_RWLOCK(mrt_lock); 103static DEFINE_RWLOCK(mrt_lock);
105 104
106/* 105/* Multicast router control variables */
107 * Multicast router control variables
108 */
109 106
110#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) 107#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
111 108
@@ -252,8 +249,8 @@ static int __net_init ipmr_rules_init(struct net *net)
252 INIT_LIST_HEAD(&net->ipv4.mr_tables); 249 INIT_LIST_HEAD(&net->ipv4.mr_tables);
253 250
254 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); 251 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
255 if (!mrt) { 252 if (IS_ERR(mrt)) {
256 err = -ENOMEM; 253 err = PTR_ERR(mrt);
257 goto err1; 254 goto err1;
258 } 255 }
259 256
@@ -301,8 +298,13 @@ static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
301 298
302static int __net_init ipmr_rules_init(struct net *net) 299static int __net_init ipmr_rules_init(struct net *net)
303{ 300{
304 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); 301 struct mr_table *mrt;
305 return net->ipv4.mrt ? 0 : -ENOMEM; 302
303 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
304 if (IS_ERR(mrt))
305 return PTR_ERR(mrt);
306 net->ipv4.mrt = mrt;
307 return 0;
306} 308}
307 309
308static void __net_exit ipmr_rules_exit(struct net *net) 310static void __net_exit ipmr_rules_exit(struct net *net)
@@ -319,13 +321,17 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
319 struct mr_table *mrt; 321 struct mr_table *mrt;
320 unsigned int i; 322 unsigned int i;
321 323
324 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
325 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
326 return ERR_PTR(-EINVAL);
327
322 mrt = ipmr_get_table(net, id); 328 mrt = ipmr_get_table(net, id);
323 if (mrt) 329 if (mrt)
324 return mrt; 330 return mrt;
325 331
326 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); 332 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
327 if (!mrt) 333 if (!mrt)
328 return NULL; 334 return ERR_PTR(-ENOMEM);
329 write_pnet(&mrt->net, net); 335 write_pnet(&mrt->net, net);
330 mrt->id = id; 336 mrt->id = id;
331 337
@@ -338,9 +344,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
338 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, 344 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
339 (unsigned long)mrt); 345 (unsigned long)mrt);
340 346
341#ifdef CONFIG_IP_PIMSM
342 mrt->mroute_reg_vif_num = -1; 347 mrt->mroute_reg_vif_num = -1;
343#endif
344#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 348#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
345 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); 349 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
346#endif 350#endif
@@ -387,8 +391,24 @@ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
387 } 391 }
388} 392}
389 393
390static 394/* Initialize ipmr pimreg/tunnel in_device */
391struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) 395static bool ipmr_init_vif_indev(const struct net_device *dev)
396{
397 struct in_device *in_dev;
398
399 ASSERT_RTNL();
400
401 in_dev = __in_dev_get_rtnl(dev);
402 if (!in_dev)
403 return false;
404 ipv4_devconf_setall(in_dev);
405 neigh_parms_data_state_setall(in_dev->arp_parms);
406 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
407
408 return true;
409}
410
411static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
392{ 412{
393 struct net_device *dev; 413 struct net_device *dev;
394 414
@@ -399,7 +419,6 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
399 int err; 419 int err;
400 struct ifreq ifr; 420 struct ifreq ifr;
401 struct ip_tunnel_parm p; 421 struct ip_tunnel_parm p;
402 struct in_device *in_dev;
403 422
404 memset(&p, 0, sizeof(p)); 423 memset(&p, 0, sizeof(p));
405 p.iph.daddr = v->vifc_rmt_addr.s_addr; 424 p.iph.daddr = v->vifc_rmt_addr.s_addr;
@@ -424,15 +443,8 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
424 if (err == 0 && 443 if (err == 0 &&
425 (dev = __dev_get_by_name(net, p.name)) != NULL) { 444 (dev = __dev_get_by_name(net, p.name)) != NULL) {
426 dev->flags |= IFF_MULTICAST; 445 dev->flags |= IFF_MULTICAST;
427 446 if (!ipmr_init_vif_indev(dev))
428 in_dev = __in_dev_get_rtnl(dev);
429 if (!in_dev)
430 goto failure; 447 goto failure;
431
432 ipv4_devconf_setall(in_dev);
433 neigh_parms_data_state_setall(in_dev->arp_parms);
434 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
435
436 if (dev_open(dev)) 448 if (dev_open(dev))
437 goto failure; 449 goto failure;
438 dev_hold(dev); 450 dev_hold(dev);
@@ -449,8 +461,7 @@ failure:
449 return NULL; 461 return NULL;
450} 462}
451 463
452#ifdef CONFIG_IP_PIMSM 464#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
453
454static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 465static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
455{ 466{
456 struct net *net = dev_net(dev); 467 struct net *net = dev_net(dev);
@@ -500,7 +511,6 @@ static void reg_vif_setup(struct net_device *dev)
500static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) 511static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
501{ 512{
502 struct net_device *dev; 513 struct net_device *dev;
503 struct in_device *in_dev;
504 char name[IFNAMSIZ]; 514 char name[IFNAMSIZ];
505 515
506 if (mrt->id == RT_TABLE_DEFAULT) 516 if (mrt->id == RT_TABLE_DEFAULT)
@@ -520,18 +530,8 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
520 return NULL; 530 return NULL;
521 } 531 }
522 532
523 rcu_read_lock(); 533 if (!ipmr_init_vif_indev(dev))
524 in_dev = __in_dev_get_rcu(dev);
525 if (!in_dev) {
526 rcu_read_unlock();
527 goto failure; 534 goto failure;
528 }
529
530 ipv4_devconf_setall(in_dev);
531 neigh_parms_data_state_setall(in_dev->arp_parms);
532 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
533 rcu_read_unlock();
534
535 if (dev_open(dev)) 535 if (dev_open(dev))
536 goto failure; 536 goto failure;
537 537
@@ -547,13 +547,56 @@ failure:
547 unregister_netdevice(dev); 547 unregister_netdevice(dev);
548 return NULL; 548 return NULL;
549} 549}
550
551/* called with rcu_read_lock() */
552static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
553 unsigned int pimlen)
554{
555 struct net_device *reg_dev = NULL;
556 struct iphdr *encap;
557
558 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
559 /* Check that:
560 * a. packet is really sent to a multicast group
561 * b. packet is not a NULL-REGISTER
562 * c. packet is not truncated
563 */
564 if (!ipv4_is_multicast(encap->daddr) ||
565 encap->tot_len == 0 ||
566 ntohs(encap->tot_len) + pimlen > skb->len)
567 return 1;
568
569 read_lock(&mrt_lock);
570 if (mrt->mroute_reg_vif_num >= 0)
571 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
572 read_unlock(&mrt_lock);
573
574 if (!reg_dev)
575 return 1;
576
577 skb->mac_header = skb->network_header;
578 skb_pull(skb, (u8 *)encap - skb->data);
579 skb_reset_network_header(skb);
580 skb->protocol = htons(ETH_P_IP);
581 skb->ip_summed = CHECKSUM_NONE;
582
583 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
584
585 netif_rx(skb);
586
587 return NET_RX_SUCCESS;
588}
589#else
590static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
591{
592 return NULL;
593}
550#endif 594#endif
551 595
552/** 596/**
553 * vif_delete - Delete a VIF entry 597 * vif_delete - Delete a VIF entry
554 * @notify: Set to 1, if the caller is a notifier_call 598 * @notify: Set to 1, if the caller is a notifier_call
555 */ 599 */
556
557static int vif_delete(struct mr_table *mrt, int vifi, int notify, 600static int vif_delete(struct mr_table *mrt, int vifi, int notify,
558 struct list_head *head) 601 struct list_head *head)
559{ 602{
@@ -575,10 +618,8 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
575 return -EADDRNOTAVAIL; 618 return -EADDRNOTAVAIL;
576 } 619 }
577 620
578#ifdef CONFIG_IP_PIMSM
579 if (vifi == mrt->mroute_reg_vif_num) 621 if (vifi == mrt->mroute_reg_vif_num)
580 mrt->mroute_reg_vif_num = -1; 622 mrt->mroute_reg_vif_num = -1;
581#endif
582 623
583 if (vifi + 1 == mrt->maxvif) { 624 if (vifi + 1 == mrt->maxvif) {
584 int tmp; 625 int tmp;
@@ -625,7 +666,6 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
625/* Destroy an unresolved cache entry, killing queued skbs 666/* Destroy an unresolved cache entry, killing queued skbs
626 * and reporting error to netlink readers. 667 * and reporting error to netlink readers.
627 */ 668 */
628
629static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) 669static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
630{ 670{
631 struct net *net = read_pnet(&mrt->net); 671 struct net *net = read_pnet(&mrt->net);
@@ -653,9 +693,7 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
653 ipmr_cache_free(c); 693 ipmr_cache_free(c);
654} 694}
655 695
656
657/* Timer process for the unresolved queue. */ 696/* Timer process for the unresolved queue. */
658
659static void ipmr_expire_process(unsigned long arg) 697static void ipmr_expire_process(unsigned long arg)
660{ 698{
661 struct mr_table *mrt = (struct mr_table *)arg; 699 struct mr_table *mrt = (struct mr_table *)arg;
@@ -695,7 +733,6 @@ out:
695} 733}
696 734
697/* Fill oifs list. It is called under write locked mrt_lock. */ 735/* Fill oifs list. It is called under write locked mrt_lock. */
698
699static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, 736static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
700 unsigned char *ttls) 737 unsigned char *ttls)
701{ 738{
@@ -731,10 +768,10 @@ static int vif_add(struct net *net, struct mr_table *mrt,
731 return -EADDRINUSE; 768 return -EADDRINUSE;
732 769
733 switch (vifc->vifc_flags) { 770 switch (vifc->vifc_flags) {
734#ifdef CONFIG_IP_PIMSM
735 case VIFF_REGISTER: 771 case VIFF_REGISTER:
736 /* 772 if (!pimsm_enabled())
737 * Special Purpose VIF in PIM 773 return -EINVAL;
774 /* Special Purpose VIF in PIM
738 * All the packets will be sent to the daemon 775 * All the packets will be sent to the daemon
739 */ 776 */
740 if (mrt->mroute_reg_vif_num >= 0) 777 if (mrt->mroute_reg_vif_num >= 0)
@@ -749,7 +786,6 @@ static int vif_add(struct net *net, struct mr_table *mrt,
749 return err; 786 return err;
750 } 787 }
751 break; 788 break;
752#endif
753 case VIFF_TUNNEL: 789 case VIFF_TUNNEL:
754 dev = ipmr_new_tunnel(net, vifc); 790 dev = ipmr_new_tunnel(net, vifc);
755 if (!dev) 791 if (!dev)
@@ -761,7 +797,6 @@ static int vif_add(struct net *net, struct mr_table *mrt,
761 return err; 797 return err;
762 } 798 }
763 break; 799 break;
764
765 case VIFF_USE_IFINDEX: 800 case VIFF_USE_IFINDEX:
766 case 0: 801 case 0:
767 if (vifc->vifc_flags == VIFF_USE_IFINDEX) { 802 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
@@ -815,10 +850,8 @@ static int vif_add(struct net *net, struct mr_table *mrt,
815 /* And finish update writing critical data */ 850 /* And finish update writing critical data */
816 write_lock_bh(&mrt_lock); 851 write_lock_bh(&mrt_lock);
817 v->dev = dev; 852 v->dev = dev;
818#ifdef CONFIG_IP_PIMSM
819 if (v->flags & VIFF_REGISTER) 853 if (v->flags & VIFF_REGISTER)
820 mrt->mroute_reg_vif_num = vifi; 854 mrt->mroute_reg_vif_num = vifi;
821#endif
822 if (vifi+1 > mrt->maxvif) 855 if (vifi+1 > mrt->maxvif)
823 mrt->maxvif = vifi+1; 856 mrt->maxvif = vifi+1;
824 write_unlock_bh(&mrt_lock); 857 write_unlock_bh(&mrt_lock);
@@ -883,9 +916,7 @@ skip:
883 return ipmr_cache_find_any_parent(mrt, vifi); 916 return ipmr_cache_find_any_parent(mrt, vifi);
884} 917}
885 918
886/* 919/* Allocate a multicast cache entry */
887 * Allocate a multicast cache entry
888 */
889static struct mfc_cache *ipmr_cache_alloc(void) 920static struct mfc_cache *ipmr_cache_alloc(void)
890{ 921{
891 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 922 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
@@ -906,10 +937,7 @@ static struct mfc_cache *ipmr_cache_alloc_unres(void)
906 return c; 937 return c;
907} 938}
908 939
909/* 940/* A cache entry has gone into a resolved state from queued */
910 * A cache entry has gone into a resolved state from queued
911 */
912
913static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, 941static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
914 struct mfc_cache *uc, struct mfc_cache *c) 942 struct mfc_cache *uc, struct mfc_cache *c)
915{ 943{
@@ -917,7 +945,6 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
917 struct nlmsgerr *e; 945 struct nlmsgerr *e;
918 946
919 /* Play the pending entries through our router */ 947 /* Play the pending entries through our router */
920
921 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { 948 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
922 if (ip_hdr(skb)->version == 0) { 949 if (ip_hdr(skb)->version == 0) {
923 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 950 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
@@ -941,34 +968,29 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
941 } 968 }
942} 969}
943 970
944/* 971/* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
945 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted 972 * expects the following bizarre scheme.
946 * expects the following bizarre scheme.
947 * 973 *
948 * Called under mrt_lock. 974 * Called under mrt_lock.
949 */ 975 */
950
951static int ipmr_cache_report(struct mr_table *mrt, 976static int ipmr_cache_report(struct mr_table *mrt,
952 struct sk_buff *pkt, vifi_t vifi, int assert) 977 struct sk_buff *pkt, vifi_t vifi, int assert)
953{ 978{
954 struct sk_buff *skb;
955 const int ihl = ip_hdrlen(pkt); 979 const int ihl = ip_hdrlen(pkt);
980 struct sock *mroute_sk;
956 struct igmphdr *igmp; 981 struct igmphdr *igmp;
957 struct igmpmsg *msg; 982 struct igmpmsg *msg;
958 struct sock *mroute_sk; 983 struct sk_buff *skb;
959 int ret; 984 int ret;
960 985
961#ifdef CONFIG_IP_PIMSM
962 if (assert == IGMPMSG_WHOLEPKT) 986 if (assert == IGMPMSG_WHOLEPKT)
963 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr)); 987 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
964 else 988 else
965#endif
966 skb = alloc_skb(128, GFP_ATOMIC); 989 skb = alloc_skb(128, GFP_ATOMIC);
967 990
968 if (!skb) 991 if (!skb)
969 return -ENOBUFS; 992 return -ENOBUFS;
970 993
971#ifdef CONFIG_IP_PIMSM
972 if (assert == IGMPMSG_WHOLEPKT) { 994 if (assert == IGMPMSG_WHOLEPKT) {
973 /* Ugly, but we have no choice with this interface. 995 /* Ugly, but we have no choice with this interface.
974 * Duplicate old header, fix ihl, length etc. 996 * Duplicate old header, fix ihl, length etc.
@@ -986,28 +1008,23 @@ static int ipmr_cache_report(struct mr_table *mrt,
986 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; 1008 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
987 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + 1009 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
988 sizeof(struct iphdr)); 1010 sizeof(struct iphdr));
989 } else 1011 } else {
990#endif 1012 /* Copy the IP header */
991 { 1013 skb_set_network_header(skb, skb->len);
992 1014 skb_put(skb, ihl);
993 /* Copy the IP header */ 1015 skb_copy_to_linear_data(skb, pkt->data, ihl);
994 1016 /* Flag to the kernel this is a route add */
995 skb_set_network_header(skb, skb->len); 1017 ip_hdr(skb)->protocol = 0;
996 skb_put(skb, ihl); 1018 msg = (struct igmpmsg *)skb_network_header(skb);
997 skb_copy_to_linear_data(skb, pkt->data, ihl); 1019 msg->im_vif = vifi;
998 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ 1020 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
999 msg = (struct igmpmsg *)skb_network_header(skb); 1021 /* Add our header */
1000 msg->im_vif = vifi; 1022 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
1001 skb_dst_set(skb, dst_clone(skb_dst(pkt))); 1023 igmp->type = assert;
1002 1024 msg->im_msgtype = assert;
1003 /* Add our header */ 1025 igmp->code = 0;
1004 1026 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1005 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); 1027 skb->transport_header = skb->network_header;
1006 igmp->type =
1007 msg->im_msgtype = assert;
1008 igmp->code = 0;
1009 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1010 skb->transport_header = skb->network_header;
1011 } 1028 }
1012 1029
1013 rcu_read_lock(); 1030 rcu_read_lock();
@@ -1019,7 +1036,6 @@ static int ipmr_cache_report(struct mr_table *mrt,
1019 } 1036 }
1020 1037
1021 /* Deliver to mrouted */ 1038 /* Deliver to mrouted */
1022
1023 ret = sock_queue_rcv_skb(mroute_sk, skb); 1039 ret = sock_queue_rcv_skb(mroute_sk, skb);
1024 rcu_read_unlock(); 1040 rcu_read_unlock();
1025 if (ret < 0) { 1041 if (ret < 0) {
@@ -1030,12 +1046,9 @@ static int ipmr_cache_report(struct mr_table *mrt,
1030 return ret; 1046 return ret;
1031} 1047}
1032 1048
1033/* 1049/* Queue a packet for resolution. It gets locked cache entry! */
1034 * Queue a packet for resolution. It gets locked cache entry! 1050static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1035 */ 1051 struct sk_buff *skb)
1036
1037static int
1038ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1039{ 1052{
1040 bool found = false; 1053 bool found = false;
1041 int err; 1054 int err;
@@ -1053,7 +1066,6 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1053 1066
1054 if (!found) { 1067 if (!found) {
1055 /* Create a new entry if allowable */ 1068 /* Create a new entry if allowable */
1056
1057 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || 1069 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1058 (c = ipmr_cache_alloc_unres()) == NULL) { 1070 (c = ipmr_cache_alloc_unres()) == NULL) {
1059 spin_unlock_bh(&mfc_unres_lock); 1071 spin_unlock_bh(&mfc_unres_lock);
@@ -1063,13 +1075,11 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1063 } 1075 }
1064 1076
1065 /* Fill in the new cache entry */ 1077 /* Fill in the new cache entry */
1066
1067 c->mfc_parent = -1; 1078 c->mfc_parent = -1;
1068 c->mfc_origin = iph->saddr; 1079 c->mfc_origin = iph->saddr;
1069 c->mfc_mcastgrp = iph->daddr; 1080 c->mfc_mcastgrp = iph->daddr;
1070 1081
1071 /* Reflect first query at mrouted. */ 1082 /* Reflect first query at mrouted. */
1072
1073 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); 1083 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1074 if (err < 0) { 1084 if (err < 0) {
1075 /* If the report failed throw the cache entry 1085 /* If the report failed throw the cache entry
@@ -1091,7 +1101,6 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1091 } 1101 }
1092 1102
1093 /* See if we can append the packet */ 1103 /* See if we can append the packet */
1094
1095 if (c->mfc_un.unres.unresolved.qlen > 3) { 1104 if (c->mfc_un.unres.unresolved.qlen > 3) {
1096 kfree_skb(skb); 1105 kfree_skb(skb);
1097 err = -ENOBUFS; 1106 err = -ENOBUFS;
@@ -1104,9 +1113,7 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1104 return err; 1113 return err;
1105} 1114}
1106 1115
1107/* 1116/* MFC cache manipulation by user space mroute daemon */
1108 * MFC cache manipulation by user space mroute daemon
1109 */
1110 1117
1111static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent) 1118static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1112{ 1119{
@@ -1177,9 +1184,8 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1177 1184
1178 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]); 1185 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1179 1186
1180 /* 1187 /* Check to see if we resolved a queued list. If so we
1181 * Check to see if we resolved a queued list. If so we 1188 * need to send on the frames and tidy up.
1182 * need to send on the frames and tidy up.
1183 */ 1189 */
1184 found = false; 1190 found = false;
1185 spin_lock_bh(&mfc_unres_lock); 1191 spin_lock_bh(&mfc_unres_lock);
@@ -1204,10 +1210,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1204 return 0; 1210 return 0;
1205} 1211}
1206 1212
1207/* 1213/* Close the multicast socket, and clear the vif tables etc */
1208 * Close the multicast socket, and clear the vif tables etc
1209 */
1210
1211static void mroute_clean_tables(struct mr_table *mrt) 1214static void mroute_clean_tables(struct mr_table *mrt)
1212{ 1215{
1213 int i; 1216 int i;
@@ -1215,7 +1218,6 @@ static void mroute_clean_tables(struct mr_table *mrt)
1215 struct mfc_cache *c, *next; 1218 struct mfc_cache *c, *next;
1216 1219
1217 /* Shut down all active vif entries */ 1220 /* Shut down all active vif entries */
1218
1219 for (i = 0; i < mrt->maxvif; i++) { 1221 for (i = 0; i < mrt->maxvif; i++) {
1220 if (!(mrt->vif_table[i].flags & VIFF_STATIC)) 1222 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1221 vif_delete(mrt, i, 0, &list); 1223 vif_delete(mrt, i, 0, &list);
@@ -1223,7 +1225,6 @@ static void mroute_clean_tables(struct mr_table *mrt)
1223 unregister_netdevice_many(&list); 1225 unregister_netdevice_many(&list);
1224 1226
1225 /* Wipe the cache */ 1227 /* Wipe the cache */
1226
1227 for (i = 0; i < MFC_LINES; i++) { 1228 for (i = 0; i < MFC_LINES; i++) {
1228 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { 1229 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1229 if (c->mfc_flags & MFC_STATIC) 1230 if (c->mfc_flags & MFC_STATIC)
@@ -1267,45 +1268,51 @@ static void mrtsock_destruct(struct sock *sk)
1267 rtnl_unlock(); 1268 rtnl_unlock();
1268} 1269}
1269 1270
1270/* 1271/* Socket options and virtual interface manipulation. The whole
1271 * Socket options and virtual interface manipulation. The whole 1272 * virtual interface system is a complete heap, but unfortunately
1272 * virtual interface system is a complete heap, but unfortunately 1273 * that's how BSD mrouted happens to think. Maybe one day with a proper
1273 * that's how BSD mrouted happens to think. Maybe one day with a proper 1274 * MOSPF/PIM router set up we can clean this up.
1274 * MOSPF/PIM router set up we can clean this up.
1275 */ 1275 */
1276 1276
1277int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) 1277int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1278 unsigned int optlen)
1278{ 1279{
1279 int ret, parent = 0;
1280 struct vifctl vif;
1281 struct mfcctl mfc;
1282 struct net *net = sock_net(sk); 1280 struct net *net = sock_net(sk);
1281 int val, ret = 0, parent = 0;
1283 struct mr_table *mrt; 1282 struct mr_table *mrt;
1283 struct vifctl vif;
1284 struct mfcctl mfc;
1285 u32 uval;
1284 1286
1287 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1288 rtnl_lock();
1285 if (sk->sk_type != SOCK_RAW || 1289 if (sk->sk_type != SOCK_RAW ||
1286 inet_sk(sk)->inet_num != IPPROTO_IGMP) 1290 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1287 return -EOPNOTSUPP; 1291 ret = -EOPNOTSUPP;
1292 goto out_unlock;
1293 }
1288 1294
1289 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1295 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1290 if (!mrt) 1296 if (!mrt) {
1291 return -ENOENT; 1297 ret = -ENOENT;
1292 1298 goto out_unlock;
1299 }
1293 if (optname != MRT_INIT) { 1300 if (optname != MRT_INIT) {
1294 if (sk != rcu_access_pointer(mrt->mroute_sk) && 1301 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1295 !ns_capable(net->user_ns, CAP_NET_ADMIN)) 1302 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1296 return -EACCES; 1303 ret = -EACCES;
1304 goto out_unlock;
1305 }
1297 } 1306 }
1298 1307
1299 switch (optname) { 1308 switch (optname) {
1300 case MRT_INIT: 1309 case MRT_INIT:
1301 if (optlen != sizeof(int)) 1310 if (optlen != sizeof(int))
1302 return -EINVAL; 1311 ret = -EINVAL;
1303 1312 if (rtnl_dereference(mrt->mroute_sk))
1304 rtnl_lock(); 1313 ret = -EADDRINUSE;
1305 if (rtnl_dereference(mrt->mroute_sk)) { 1314 if (ret)
1306 rtnl_unlock(); 1315 break;
1307 return -EADDRINUSE;
1308 }
1309 1316
1310 ret = ip_ra_control(sk, 1, mrtsock_destruct); 1317 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1311 if (ret == 0) { 1318 if (ret == 0) {
@@ -1315,129 +1322,133 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1315 NETCONFA_IFINDEX_ALL, 1322 NETCONFA_IFINDEX_ALL,
1316 net->ipv4.devconf_all); 1323 net->ipv4.devconf_all);
1317 } 1324 }
1318 rtnl_unlock(); 1325 break;
1319 return ret;
1320 case MRT_DONE: 1326 case MRT_DONE:
1321 if (sk != rcu_access_pointer(mrt->mroute_sk)) 1327 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1322 return -EACCES; 1328 ret = -EACCES;
1323 return ip_ra_control(sk, 0, NULL); 1329 } else {
1330 /* We need to unlock here because mrtsock_destruct takes
1331 * care of rtnl itself and we can't change that due to
1332 * the IP_ROUTER_ALERT setsockopt which runs without it.
1333 */
1334 rtnl_unlock();
1335 ret = ip_ra_control(sk, 0, NULL);
1336 goto out;
1337 }
1338 break;
1324 case MRT_ADD_VIF: 1339 case MRT_ADD_VIF:
1325 case MRT_DEL_VIF: 1340 case MRT_DEL_VIF:
1326 if (optlen != sizeof(vif)) 1341 if (optlen != sizeof(vif)) {
1327 return -EINVAL; 1342 ret = -EINVAL;
1328 if (copy_from_user(&vif, optval, sizeof(vif))) 1343 break;
1329 return -EFAULT; 1344 }
1330 if (vif.vifc_vifi >= MAXVIFS) 1345 if (copy_from_user(&vif, optval, sizeof(vif))) {
1331 return -ENFILE; 1346 ret = -EFAULT;
1332 rtnl_lock(); 1347 break;
1348 }
1349 if (vif.vifc_vifi >= MAXVIFS) {
1350 ret = -ENFILE;
1351 break;
1352 }
1333 if (optname == MRT_ADD_VIF) { 1353 if (optname == MRT_ADD_VIF) {
1334 ret = vif_add(net, mrt, &vif, 1354 ret = vif_add(net, mrt, &vif,
1335 sk == rtnl_dereference(mrt->mroute_sk)); 1355 sk == rtnl_dereference(mrt->mroute_sk));
1336 } else { 1356 } else {
1337 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL); 1357 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1338 } 1358 }
1339 rtnl_unlock(); 1359 break;
1340 return ret; 1360 /* Manipulate the forwarding caches. These live
1341 1361 * in a sort of kernel/user symbiosis.
1342 /* 1362 */
1343 * Manipulate the forwarding caches. These live
1344 * in a sort of kernel/user symbiosis.
1345 */
1346 case MRT_ADD_MFC: 1363 case MRT_ADD_MFC:
1347 case MRT_DEL_MFC: 1364 case MRT_DEL_MFC:
1348 parent = -1; 1365 parent = -1;
1349 case MRT_ADD_MFC_PROXY: 1366 case MRT_ADD_MFC_PROXY:
1350 case MRT_DEL_MFC_PROXY: 1367 case MRT_DEL_MFC_PROXY:
1351 if (optlen != sizeof(mfc)) 1368 if (optlen != sizeof(mfc)) {
1352 return -EINVAL; 1369 ret = -EINVAL;
1353 if (copy_from_user(&mfc, optval, sizeof(mfc))) 1370 break;
1354 return -EFAULT; 1371 }
1372 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1373 ret = -EFAULT;
1374 break;
1375 }
1355 if (parent == 0) 1376 if (parent == 0)
1356 parent = mfc.mfcc_parent; 1377 parent = mfc.mfcc_parent;
1357 rtnl_lock();
1358 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY) 1378 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1359 ret = ipmr_mfc_delete(mrt, &mfc, parent); 1379 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1360 else 1380 else
1361 ret = ipmr_mfc_add(net, mrt, &mfc, 1381 ret = ipmr_mfc_add(net, mrt, &mfc,
1362 sk == rtnl_dereference(mrt->mroute_sk), 1382 sk == rtnl_dereference(mrt->mroute_sk),
1363 parent); 1383 parent);
1364 rtnl_unlock(); 1384 break;
1365 return ret; 1385 /* Control PIM assert. */
1366 /*
1367 * Control PIM assert.
1368 */
1369 case MRT_ASSERT: 1386 case MRT_ASSERT:
1370 { 1387 if (optlen != sizeof(val)) {
1371 int v; 1388 ret = -EINVAL;
1372 if (optlen != sizeof(v)) 1389 break;
1373 return -EINVAL; 1390 }
1374 if (get_user(v, (int __user *)optval)) 1391 if (get_user(val, (int __user *)optval)) {
1375 return -EFAULT; 1392 ret = -EFAULT;
1376 mrt->mroute_do_assert = v; 1393 break;
1377 return 0; 1394 }
1378 } 1395 mrt->mroute_do_assert = val;
1379#ifdef CONFIG_IP_PIMSM 1396 break;
1380 case MRT_PIM: 1397 case MRT_PIM:
1381 { 1398 if (!pimsm_enabled()) {
1382 int v; 1399 ret = -ENOPROTOOPT;
1383 1400 break;
1384 if (optlen != sizeof(v)) 1401 }
1385 return -EINVAL; 1402 if (optlen != sizeof(val)) {
1386 if (get_user(v, (int __user *)optval)) 1403 ret = -EINVAL;
1387 return -EFAULT; 1404 break;
1388 v = !!v; 1405 }
1406 if (get_user(val, (int __user *)optval)) {
1407 ret = -EFAULT;
1408 break;
1409 }
1389 1410
1390 rtnl_lock(); 1411 val = !!val;
1391 ret = 0; 1412 if (val != mrt->mroute_do_pim) {
1392 if (v != mrt->mroute_do_pim) { 1413 mrt->mroute_do_pim = val;
1393 mrt->mroute_do_pim = v; 1414 mrt->mroute_do_assert = val;
1394 mrt->mroute_do_assert = v;
1395 } 1415 }
1396 rtnl_unlock(); 1416 break;
1397 return ret;
1398 }
1399#endif
1400#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1401 case MRT_TABLE: 1417 case MRT_TABLE:
1402 { 1418 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1403 u32 v; 1419 ret = -ENOPROTOOPT;
1404 1420 break;
1405 if (optlen != sizeof(u32)) 1421 }
1406 return -EINVAL; 1422 if (optlen != sizeof(uval)) {
1407 if (get_user(v, (u32 __user *)optval)) 1423 ret = -EINVAL;
1408 return -EFAULT; 1424 break;
1409 1425 }
1410 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */ 1426 if (get_user(uval, (u32 __user *)optval)) {
1411 if (v != RT_TABLE_DEFAULT && v >= 1000000000) 1427 ret = -EFAULT;
1412 return -EINVAL; 1428 break;
1429 }
1413 1430
1414 rtnl_lock();
1415 ret = 0;
1416 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1431 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1417 ret = -EBUSY; 1432 ret = -EBUSY;
1418 } else { 1433 } else {
1419 if (!ipmr_new_table(net, v)) 1434 mrt = ipmr_new_table(net, uval);
1420 ret = -ENOMEM; 1435 if (IS_ERR(mrt))
1436 ret = PTR_ERR(mrt);
1421 else 1437 else
1422 raw_sk(sk)->ipmr_table = v; 1438 raw_sk(sk)->ipmr_table = uval;
1423 } 1439 }
1424 rtnl_unlock(); 1440 break;
1425 return ret; 1441 /* Spurious command, or MRT_VERSION which you cannot set. */
1426 }
1427#endif
1428 /*
1429 * Spurious command, or MRT_VERSION which you cannot
1430 * set.
1431 */
1432 default: 1442 default:
1433 return -ENOPROTOOPT; 1443 ret = -ENOPROTOOPT;
1434 } 1444 }
1445out_unlock:
1446 rtnl_unlock();
1447out:
1448 return ret;
1435} 1449}
1436 1450
1437/* 1451/* Getsock opt support for the multicast routing system. */
1438 * Getsock opt support for the multicast routing system.
1439 */
1440
1441int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) 1452int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1442{ 1453{
1443 int olr; 1454 int olr;
@@ -1453,39 +1464,35 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1453 if (!mrt) 1464 if (!mrt)
1454 return -ENOENT; 1465 return -ENOENT;
1455 1466
1456 if (optname != MRT_VERSION && 1467 switch (optname) {
1457#ifdef CONFIG_IP_PIMSM 1468 case MRT_VERSION:
1458 optname != MRT_PIM && 1469 val = 0x0305;
1459#endif 1470 break;
1460 optname != MRT_ASSERT) 1471 case MRT_PIM:
1472 if (!pimsm_enabled())
1473 return -ENOPROTOOPT;
1474 val = mrt->mroute_do_pim;
1475 break;
1476 case MRT_ASSERT:
1477 val = mrt->mroute_do_assert;
1478 break;
1479 default:
1461 return -ENOPROTOOPT; 1480 return -ENOPROTOOPT;
1481 }
1462 1482
1463 if (get_user(olr, optlen)) 1483 if (get_user(olr, optlen))
1464 return -EFAULT; 1484 return -EFAULT;
1465
1466 olr = min_t(unsigned int, olr, sizeof(int)); 1485 olr = min_t(unsigned int, olr, sizeof(int));
1467 if (olr < 0) 1486 if (olr < 0)
1468 return -EINVAL; 1487 return -EINVAL;
1469
1470 if (put_user(olr, optlen)) 1488 if (put_user(olr, optlen))
1471 return -EFAULT; 1489 return -EFAULT;
1472 if (optname == MRT_VERSION)
1473 val = 0x0305;
1474#ifdef CONFIG_IP_PIMSM
1475 else if (optname == MRT_PIM)
1476 val = mrt->mroute_do_pim;
1477#endif
1478 else
1479 val = mrt->mroute_do_assert;
1480 if (copy_to_user(optval, &val, olr)) 1490 if (copy_to_user(optval, &val, olr))
1481 return -EFAULT; 1491 return -EFAULT;
1482 return 0; 1492 return 0;
1483} 1493}
1484 1494
1485/* 1495/* The IP multicast ioctl support routines. */
1486 * The IP multicast ioctl support routines.
1487 */
1488
1489int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) 1496int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1490{ 1497{
1491 struct sioc_sg_req sr; 1498 struct sioc_sg_req sr;
@@ -1618,7 +1625,6 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1618} 1625}
1619#endif 1626#endif
1620 1627
1621
1622static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) 1628static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1623{ 1629{
1624 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1630 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
@@ -1640,17 +1646,14 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
1640 return NOTIFY_DONE; 1646 return NOTIFY_DONE;
1641} 1647}
1642 1648
1643
1644static struct notifier_block ip_mr_notifier = { 1649static struct notifier_block ip_mr_notifier = {
1645 .notifier_call = ipmr_device_event, 1650 .notifier_call = ipmr_device_event,
1646}; 1651};
1647 1652
1648/* 1653/* Encapsulate a packet by attaching a valid IPIP header to it.
1649 * Encapsulate a packet by attaching a valid IPIP header to it. 1654 * This avoids tunnel drivers and other mess and gives us the speed so
1650 * This avoids tunnel drivers and other mess and gives us the speed so 1655 * important for multicast video.
1651 * important for multicast video.
1652 */ 1656 */
1653
1654static void ip_encap(struct net *net, struct sk_buff *skb, 1657static void ip_encap(struct net *net, struct sk_buff *skb,
1655 __be32 saddr, __be32 daddr) 1658 __be32 saddr, __be32 daddr)
1656{ 1659{
@@ -1692,9 +1695,7 @@ static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1692 return dst_output(net, sk, skb); 1695 return dst_output(net, sk, skb);
1693} 1696}
1694 1697
1695/* 1698/* Processing handlers for ipmr_forward */
1696 * Processing handlers for ipmr_forward
1697 */
1698 1699
1699static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, 1700static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1700 struct sk_buff *skb, struct mfc_cache *c, int vifi) 1701 struct sk_buff *skb, struct mfc_cache *c, int vifi)
@@ -1709,7 +1710,6 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1709 if (!vif->dev) 1710 if (!vif->dev)
1710 goto out_free; 1711 goto out_free;
1711 1712
1712#ifdef CONFIG_IP_PIMSM
1713 if (vif->flags & VIFF_REGISTER) { 1713 if (vif->flags & VIFF_REGISTER) {
1714 vif->pkt_out++; 1714 vif->pkt_out++;
1715 vif->bytes_out += skb->len; 1715 vif->bytes_out += skb->len;
@@ -1718,7 +1718,6 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1718 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); 1718 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1719 goto out_free; 1719 goto out_free;
1720 } 1720 }
1721#endif
1722 1721
1723 if (vif->flags & VIFF_TUNNEL) { 1722 if (vif->flags & VIFF_TUNNEL) {
1724 rt = ip_route_output_ports(net, &fl4, NULL, 1723 rt = ip_route_output_ports(net, &fl4, NULL,
@@ -1745,7 +1744,6 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1745 * allow to send ICMP, so that packets will disappear 1744 * allow to send ICMP, so that packets will disappear
1746 * to blackhole. 1745 * to blackhole.
1747 */ 1746 */
1748
1749 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 1747 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1750 ip_rt_put(rt); 1748 ip_rt_put(rt);
1751 goto out_free; 1749 goto out_free;
@@ -1777,8 +1775,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1777 1775
1778 IPCB(skb)->flags |= IPSKB_FORWARDED; 1776 IPCB(skb)->flags |= IPSKB_FORWARDED;
1779 1777
1780 /* 1778 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1781 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1782 * not only before forwarding, but after forwarding on all output 1779 * not only before forwarding, but after forwarding on all output
1783 * interfaces. It is clear, if mrouter runs a multicasting 1780 * interfaces. It is clear, if mrouter runs a multicasting
1784 * program, it should receive packets not depending to what interface 1781 * program, it should receive packets not depending to what interface
@@ -1809,7 +1806,6 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1809} 1806}
1810 1807
1811/* "local" means that we should preserve one skb (for local delivery) */ 1808/* "local" means that we should preserve one skb (for local delivery) */
1812
1813static void ip_mr_forward(struct net *net, struct mr_table *mrt, 1809static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1814 struct sk_buff *skb, struct mfc_cache *cache, 1810 struct sk_buff *skb, struct mfc_cache *cache,
1815 int local) 1811 int local)
@@ -1834,9 +1830,7 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1834 goto forward; 1830 goto forward;
1835 } 1831 }
1836 1832
1837 /* 1833 /* Wrong interface: drop packet and (maybe) send PIM assert. */
1838 * Wrong interface: drop packet and (maybe) send PIM assert.
1839 */
1840 if (mrt->vif_table[vif].dev != skb->dev) { 1834 if (mrt->vif_table[vif].dev != skb->dev) {
1841 if (rt_is_output_route(skb_rtable(skb))) { 1835 if (rt_is_output_route(skb_rtable(skb))) {
1842 /* It is our own packet, looped back. 1836 /* It is our own packet, looped back.
@@ -1875,9 +1869,7 @@ forward:
1875 mrt->vif_table[vif].pkt_in++; 1869 mrt->vif_table[vif].pkt_in++;
1876 mrt->vif_table[vif].bytes_in += skb->len; 1870 mrt->vif_table[vif].bytes_in += skb->len;
1877 1871
1878 /* 1872 /* Forward the frame */
1879 * Forward the frame
1880 */
1881 if (cache->mfc_origin == htonl(INADDR_ANY) && 1873 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1882 cache->mfc_mcastgrp == htonl(INADDR_ANY)) { 1874 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1883 if (true_vifi >= 0 && 1875 if (true_vifi >= 0 &&
@@ -1951,11 +1943,9 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1951 return mrt; 1943 return mrt;
1952} 1944}
1953 1945
1954/* 1946/* Multicast packets for forwarding arrive here
1955 * Multicast packets for forwarding arrive here 1947 * Called with rcu_read_lock();
1956 * Called with rcu_read_lock();
1957 */ 1948 */
1958
1959int ip_mr_input(struct sk_buff *skb) 1949int ip_mr_input(struct sk_buff *skb)
1960{ 1950{
1961 struct mfc_cache *cache; 1951 struct mfc_cache *cache;
@@ -2006,9 +1996,7 @@ int ip_mr_input(struct sk_buff *skb)
2006 vif); 1996 vif);
2007 } 1997 }
2008 1998
2009 /* 1999 /* No usable cache entry */
2010 * No usable cache entry
2011 */
2012 if (!cache) { 2000 if (!cache) {
2013 int vif; 2001 int vif;
2014 2002
@@ -2049,53 +2037,8 @@ dont_forward:
2049 return 0; 2037 return 0;
2050} 2038}
2051 2039
2052#ifdef CONFIG_IP_PIMSM
2053/* called with rcu_read_lock() */
2054static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
2055 unsigned int pimlen)
2056{
2057 struct net_device *reg_dev = NULL;
2058 struct iphdr *encap;
2059
2060 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
2061 /*
2062 * Check that:
2063 * a. packet is really sent to a multicast group
2064 * b. packet is not a NULL-REGISTER
2065 * c. packet is not truncated
2066 */
2067 if (!ipv4_is_multicast(encap->daddr) ||
2068 encap->tot_len == 0 ||
2069 ntohs(encap->tot_len) + pimlen > skb->len)
2070 return 1;
2071
2072 read_lock(&mrt_lock);
2073 if (mrt->mroute_reg_vif_num >= 0)
2074 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
2075 read_unlock(&mrt_lock);
2076
2077 if (!reg_dev)
2078 return 1;
2079
2080 skb->mac_header = skb->network_header;
2081 skb_pull(skb, (u8 *)encap - skb->data);
2082 skb_reset_network_header(skb);
2083 skb->protocol = htons(ETH_P_IP);
2084 skb->ip_summed = CHECKSUM_NONE;
2085
2086 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
2087
2088 netif_rx(skb);
2089
2090 return NET_RX_SUCCESS;
2091}
2092#endif
2093
2094#ifdef CONFIG_IP_PIMSM_V1 2040#ifdef CONFIG_IP_PIMSM_V1
2095/* 2041/* Handle IGMP messages of PIMv1 */
2096 * Handle IGMP messages of PIMv1
2097 */
2098
2099int pim_rcv_v1(struct sk_buff *skb) 2042int pim_rcv_v1(struct sk_buff *skb)
2100{ 2043{
2101 struct igmphdr *pim; 2044 struct igmphdr *pim;
@@ -2420,9 +2363,8 @@ done:
2420} 2363}
2421 2364
2422#ifdef CONFIG_PROC_FS 2365#ifdef CONFIG_PROC_FS
2423/* 2366/* The /proc interfaces to multicast routing :
2424 * The /proc interfaces to multicast routing : 2367 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2425 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2426 */ 2368 */
2427struct ipmr_vif_iter { 2369struct ipmr_vif_iter {
2428 struct seq_net_private p; 2370 struct seq_net_private p;
@@ -2706,10 +2648,7 @@ static const struct net_protocol pim_protocol = {
2706}; 2648};
2707#endif 2649#endif
2708 2650
2709 2651/* Setup for IP multicast routing */
2710/*
2711 * Setup for IP multicast routing
2712 */
2713static int __net_init ipmr_net_init(struct net *net) 2652static int __net_init ipmr_net_init(struct net *net)
2714{ 2653{
2715 int err; 2654 int err;
@@ -2759,8 +2698,6 @@ int __init ip_mr_init(void)
2759 sizeof(struct mfc_cache), 2698 sizeof(struct mfc_cache),
2760 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 2699 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2761 NULL); 2700 NULL);
2762 if (!mrt_cachep)
2763 return -ENOMEM;
2764 2701
2765 err = register_pernet_subsys(&ipmr_net_ops); 2702 err = register_pernet_subsys(&ipmr_net_ops);
2766 if (err) 2703 if (err)
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 11dccba474b7..b488cac9c5ca 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -38,13 +38,13 @@ MODULE_DESCRIPTION("arptables core");
38/*#define DEBUG_ARP_TABLES_USER*/ 38/*#define DEBUG_ARP_TABLES_USER*/
39 39
40#ifdef DEBUG_ARP_TABLES 40#ifdef DEBUG_ARP_TABLES
41#define dprintf(format, args...) printk(format , ## args) 41#define dprintf(format, args...) pr_debug(format, ## args)
42#else 42#else
43#define dprintf(format, args...) 43#define dprintf(format, args...)
44#endif 44#endif
45 45
46#ifdef DEBUG_ARP_TABLES_USER 46#ifdef DEBUG_ARP_TABLES_USER
47#define duprintf(format, args...) printk(format , ## args) 47#define duprintf(format, args...) pr_debug(format, ## args)
48#else 48#else
49#define duprintf(format, args...) 49#define duprintf(format, args...)
50#endif 50#endif
@@ -1905,7 +1905,7 @@ static int __init arp_tables_init(void)
1905 if (ret < 0) 1905 if (ret < 0)
1906 goto err4; 1906 goto err4;
1907 1907
1908 printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n"); 1908 pr_info("arp_tables: (C) 2002 David S. Miller\n");
1909 return 0; 1909 return 0;
1910 1910
1911err4: 1911err4:
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 461ca926fd39..e3c46e8e2762 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -451,7 +451,7 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
451 451
452 ret = nf_register_sockopt(&so_getorigdst); 452 ret = nf_register_sockopt(&so_getorigdst);
453 if (ret < 0) { 453 if (ret < 0) {
454 printk(KERN_ERR "Unable to register netfilter socket option\n"); 454 pr_err("Unable to register netfilter socket option\n");
455 return ret; 455 return ret;
456 } 456 }
457 457
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 657d2307f031..b3ca21b2ba9b 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -45,7 +45,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
45 struct net *net = nf_ct_net(ct); 45 struct net *net = nf_ct_net(ct);
46 const struct nf_conn *master = ct->master; 46 const struct nf_conn *master = ct->master;
47 struct nf_conntrack_expect *other_exp; 47 struct nf_conntrack_expect *other_exp;
48 struct nf_conntrack_tuple t; 48 struct nf_conntrack_tuple t = {};
49 const struct nf_ct_pptp_master *ct_pptp_info; 49 const struct nf_ct_pptp_master *ct_pptp_info;
50 const struct nf_nat_pptp *nat_pptp_info; 50 const struct nf_nat_pptp *nat_pptp_info;
51 struct nf_nat_range range; 51 struct nf_nat_range range;
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index ddb894ac1458..c9b52c361da2 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1048,7 +1048,7 @@ static int snmp_parse_mangle(unsigned char *msg,
1048 if (!asn1_uint_decode (&ctx, end, &vers)) 1048 if (!asn1_uint_decode (&ctx, end, &vers))
1049 return 0; 1049 return 0;
1050 if (debug > 1) 1050 if (debug > 1)
1051 printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1); 1051 pr_debug("bsalg: snmp version: %u\n", vers + 1);
1052 if (vers > 1) 1052 if (vers > 1)
1053 return 1; 1053 return 1;
1054 1054
@@ -1064,10 +1064,10 @@ static int snmp_parse_mangle(unsigned char *msg,
1064 if (debug > 1) { 1064 if (debug > 1) {
1065 unsigned int i; 1065 unsigned int i;
1066 1066
1067 printk(KERN_DEBUG "bsalg: community: "); 1067 pr_debug("bsalg: community: ");
1068 for (i = 0; i < comm.len; i++) 1068 for (i = 0; i < comm.len; i++)
1069 printk("%c", comm.data[i]); 1069 pr_cont("%c", comm.data[i]);
1070 printk("\n"); 1070 pr_cont("\n");
1071 } 1071 }
1072 kfree(comm.data); 1072 kfree(comm.data);
1073 1073
@@ -1091,9 +1091,9 @@ static int snmp_parse_mangle(unsigned char *msg,
1091 }; 1091 };
1092 1092
1093 if (pdutype > SNMP_PDU_TRAP2) 1093 if (pdutype > SNMP_PDU_TRAP2)
1094 printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype); 1094 pr_debug("bsalg: bad pdu type %u\n", pdutype);
1095 else 1095 else
1096 printk(KERN_DEBUG "bsalg: pdu: %s\n", pdus[pdutype]); 1096 pr_debug("bsalg: pdu: %s\n", pdus[pdutype]);
1097 } 1097 }
1098 if (pdutype != SNMP_PDU_RESPONSE && 1098 if (pdutype != SNMP_PDU_RESPONSE &&
1099 pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2) 1099 pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2)
@@ -1119,7 +1119,7 @@ static int snmp_parse_mangle(unsigned char *msg,
1119 return 0; 1119 return 0;
1120 1120
1121 if (debug > 1) 1121 if (debug > 1)
1122 printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u " 1122 pr_debug("bsalg: request: id=0x%lx error_status=%u "
1123 "error_index=%u\n", req.id, req.error_status, 1123 "error_index=%u\n", req.id, req.error_status,
1124 req.error_index); 1124 req.error_index);
1125 } 1125 }
@@ -1145,13 +1145,13 @@ static int snmp_parse_mangle(unsigned char *msg,
1145 } 1145 }
1146 1146
1147 if (debug > 1) { 1147 if (debug > 1) {
1148 printk(KERN_DEBUG "bsalg: object: "); 1148 pr_debug("bsalg: object: ");
1149 for (i = 0; i < obj->id_len; i++) { 1149 for (i = 0; i < obj->id_len; i++) {
1150 if (i > 0) 1150 if (i > 0)
1151 printk("."); 1151 pr_cont(".");
1152 printk("%lu", obj->id[i]); 1152 pr_cont("%lu", obj->id[i]);
1153 } 1153 }
1154 printk(": type=%u\n", obj->type); 1154 pr_cont(": type=%u\n", obj->type);
1155 1155
1156 } 1156 }
1157 1157
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8c0d0bdc2a7c..63e5be0abd86 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -406,10 +406,12 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
406 ip_select_ident(net, skb, NULL); 406 ip_select_ident(net, skb, NULL);
407 407
408 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 408 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
409 skb->transport_header += iphlen;
410 if (iph->protocol == IPPROTO_ICMP &&
411 length >= iphlen + sizeof(struct icmphdr))
412 icmp_out_count(net, ((struct icmphdr *)
413 skb_transport_header(skb))->type);
409 } 414 }
410 if (iph->protocol == IPPROTO_ICMP)
411 icmp_out_count(net, ((struct icmphdr *)
412 skb_transport_header(skb))->type);
413 415
414 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, 416 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
415 net, sk, skb, NULL, rt->dst.dev, 417 net, sk, skb, NULL, rt->dst.dev,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0cfa7c0c1e80..c1728771cf89 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -451,11 +451,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
451 unsigned int mask; 451 unsigned int mask;
452 struct sock *sk = sock->sk; 452 struct sock *sk = sock->sk;
453 const struct tcp_sock *tp = tcp_sk(sk); 453 const struct tcp_sock *tp = tcp_sk(sk);
454 int state;
454 455
455 sock_rps_record_flow(sk); 456 sock_rps_record_flow(sk);
456 457
457 sock_poll_wait(file, sk_sleep(sk), wait); 458 sock_poll_wait(file, sk_sleep(sk), wait);
458 if (sk->sk_state == TCP_LISTEN) 459
460 state = sk_state_load(sk);
461 if (state == TCP_LISTEN)
459 return inet_csk_listen_poll(sk); 462 return inet_csk_listen_poll(sk);
460 463
461 /* Socket is not locked. We are protected from async events 464 /* Socket is not locked. We are protected from async events
@@ -492,14 +495,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
492 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 495 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
493 * blocking on fresh not-connected or disconnected socket. --ANK 496 * blocking on fresh not-connected or disconnected socket. --ANK
494 */ 497 */
495 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) 498 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
496 mask |= POLLHUP; 499 mask |= POLLHUP;
497 if (sk->sk_shutdown & RCV_SHUTDOWN) 500 if (sk->sk_shutdown & RCV_SHUTDOWN)
498 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 501 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
499 502
500 /* Connected or passive Fast Open socket? */ 503 /* Connected or passive Fast Open socket? */
501 if (sk->sk_state != TCP_SYN_SENT && 504 if (state != TCP_SYN_SENT &&
502 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) { 505 (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
503 int target = sock_rcvlowat(sk, 0, INT_MAX); 506 int target = sock_rcvlowat(sk, 0, INT_MAX);
504 507
505 if (tp->urg_seq == tp->copied_seq && 508 if (tp->urg_seq == tp->copied_seq &&
@@ -507,9 +510,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
507 tp->urg_data) 510 tp->urg_data)
508 target++; 511 target++;
509 512
510 /* Potential race condition. If read of tp below will
511 * escape above sk->sk_state, we can be illegally awaken
512 * in SYN_* states. */
513 if (tp->rcv_nxt - tp->copied_seq >= target) 513 if (tp->rcv_nxt - tp->copied_seq >= target)
514 mask |= POLLIN | POLLRDNORM; 514 mask |= POLLIN | POLLRDNORM;
515 515
@@ -1934,7 +1934,7 @@ void tcp_set_state(struct sock *sk, int state)
1934 /* Change state AFTER socket is unhashed to avoid closed 1934 /* Change state AFTER socket is unhashed to avoid closed
1935 * socket sitting in hash tables. 1935 * socket sitting in hash tables.
1936 */ 1936 */
1937 sk->sk_state = state; 1937 sk_state_store(sk, state);
1938 1938
1939#ifdef STATE_TRACE 1939#ifdef STATE_TRACE
1940 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); 1940 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@ -2644,7 +2644,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2644 if (sk->sk_type != SOCK_STREAM) 2644 if (sk->sk_type != SOCK_STREAM)
2645 return; 2645 return;
2646 2646
2647 info->tcpi_state = sk->sk_state; 2647 info->tcpi_state = sk_state_load(sk);
2648
2648 info->tcpi_ca_state = icsk->icsk_ca_state; 2649 info->tcpi_ca_state = icsk->icsk_ca_state;
2649 info->tcpi_retransmits = icsk->icsk_retransmits; 2650 info->tcpi_retransmits = icsk->icsk_retransmits;
2650 info->tcpi_probes = icsk->icsk_probes_out; 2651 info->tcpi_probes = icsk->icsk_probes_out;
@@ -2672,7 +2673,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2672 info->tcpi_snd_mss = tp->mss_cache; 2673 info->tcpi_snd_mss = tp->mss_cache;
2673 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 2674 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2674 2675
2675 if (sk->sk_state == TCP_LISTEN) { 2676 if (info->tcpi_state == TCP_LISTEN) {
2676 info->tcpi_unacked = sk->sk_ack_backlog; 2677 info->tcpi_unacked = sk->sk_ack_backlog;
2677 info->tcpi_sacked = sk->sk_max_ack_backlog; 2678 info->tcpi_sacked = sk->sk_max_ack_backlog;
2678 } else { 2679 } else {
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 479f34946177..b31604086edd 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -21,7 +21,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
21{ 21{
22 struct tcp_info *info = _info; 22 struct tcp_info *info = _info;
23 23
24 if (sk->sk_state == TCP_LISTEN) { 24 if (sk_state_load(sk) == TCP_LISTEN) {
25 r->idiag_rqueue = sk->sk_ack_backlog; 25 r->idiag_rqueue = sk->sk_ack_backlog;
26 r->idiag_wqueue = sk->sk_max_ack_backlog; 26 r->idiag_wqueue = sk->sk_max_ack_backlog;
27 } else if (sk->sk_type == SOCK_STREAM) { 27 } else if (sk->sk_type == SOCK_STREAM) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 950e28c0cdf2..ba09016d1bfd 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2158,6 +2158,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2158 __u16 destp = ntohs(inet->inet_dport); 2158 __u16 destp = ntohs(inet->inet_dport);
2159 __u16 srcp = ntohs(inet->inet_sport); 2159 __u16 srcp = ntohs(inet->inet_sport);
2160 int rx_queue; 2160 int rx_queue;
2161 int state;
2161 2162
2162 if (icsk->icsk_pending == ICSK_TIME_RETRANS || 2163 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2163 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 2164 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
@@ -2175,17 +2176,18 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2175 timer_expires = jiffies; 2176 timer_expires = jiffies;
2176 } 2177 }
2177 2178
2178 if (sk->sk_state == TCP_LISTEN) 2179 state = sk_state_load(sk);
2180 if (state == TCP_LISTEN)
2179 rx_queue = sk->sk_ack_backlog; 2181 rx_queue = sk->sk_ack_backlog;
2180 else 2182 else
2181 /* 2183 /* Because we don't lock the socket,
2182 * because we dont lock socket, we might find a transient negative value 2184 * we might find a transient negative value.
2183 */ 2185 */
2184 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 2186 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2185 2187
2186 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2188 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2187 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", 2189 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2188 i, src, srcp, dest, destp, sk->sk_state, 2190 i, src, srcp, dest, destp, state,
2189 tp->write_seq - tp->snd_una, 2191 tp->write_seq - tp->snd_una,
2190 rx_queue, 2192 rx_queue,
2191 timer_active, 2193 timer_active,
@@ -2199,8 +2201,8 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2199 jiffies_to_clock_t(icsk->icsk_ack.ato), 2201 jiffies_to_clock_t(icsk->icsk_ack.ato),
2200 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 2202 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2201 tp->snd_cwnd, 2203 tp->snd_cwnd,
2202 sk->sk_state == TCP_LISTEN ? 2204 state == TCP_LISTEN ?
2203 (fastopenq ? fastopenq->max_qlen : 0) : 2205 fastopenq->max_qlen :
2204 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); 2206 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2205} 2207}
2206 2208
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 124338a39e29..5ee56d0a8699 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1651,7 +1651,6 @@ out:
1651 if (!err) { 1651 if (!err) {
1652 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); 1652 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1653 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1653 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1654 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1655 } else { 1654 } else {
1656 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 1655 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1657 } 1656 }
@@ -2015,7 +2014,6 @@ out:
2015 if (!err) { 2014 if (!err) {
2016 ICMP6MSGOUT_INC_STATS(net, idev, type); 2015 ICMP6MSGOUT_INC_STATS(net, idev, type);
2017 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 2016 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2018 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
2019 } else 2017 } else
2020 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 2018 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2021 2019
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c8bc9b4ac328..89758be9c6a6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -62,6 +62,7 @@
62#include <net/lwtunnel.h> 62#include <net/lwtunnel.h>
63#include <net/ip_tunnels.h> 63#include <net/ip_tunnels.h>
64#include <net/l3mdev.h> 64#include <net/l3mdev.h>
65#include <trace/events/fib6.h>
65 66
66#include <asm/uaccess.h> 67#include <asm/uaccess.h>
67 68
@@ -404,6 +405,14 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
404 } 405 }
405} 406}
406 407
408static bool __rt6_check_expired(const struct rt6_info *rt)
409{
410 if (rt->rt6i_flags & RTF_EXPIRES)
411 return time_after(jiffies, rt->dst.expires);
412 else
413 return false;
414}
415
407static bool rt6_check_expired(const struct rt6_info *rt) 416static bool rt6_check_expired(const struct rt6_info *rt)
408{ 417{
409 if (rt->rt6i_flags & RTF_EXPIRES) { 418 if (rt->rt6i_flags & RTF_EXPIRES) {
@@ -857,6 +866,9 @@ restart:
857 } 866 }
858 dst_use(&rt->dst, jiffies); 867 dst_use(&rt->dst, jiffies);
859 read_unlock_bh(&table->tb6_lock); 868 read_unlock_bh(&table->tb6_lock);
869
870 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
871
860 return rt; 872 return rt;
861 873
862} 874}
@@ -1070,6 +1082,8 @@ redo_rt6_select:
1070 read_unlock_bh(&table->tb6_lock); 1082 read_unlock_bh(&table->tb6_lock);
1071 1083
1072 rt6_dst_from_metrics_check(rt); 1084 rt6_dst_from_metrics_check(rt);
1085
1086 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1073 return rt; 1087 return rt;
1074 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) && 1088 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1075 !(rt->rt6i_flags & RTF_GATEWAY))) { 1089 !(rt->rt6i_flags & RTF_GATEWAY))) {
@@ -1093,6 +1107,8 @@ redo_rt6_select:
1093 uncached_rt = net->ipv6.ip6_null_entry; 1107 uncached_rt = net->ipv6.ip6_null_entry;
1094 1108
1095 dst_hold(&uncached_rt->dst); 1109 dst_hold(&uncached_rt->dst);
1110
1111 trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6);
1096 return uncached_rt; 1112 return uncached_rt;
1097 1113
1098 } else { 1114 } else {
@@ -1117,6 +1133,7 @@ redo_rt6_select:
1117 dst_release(&rt->dst); 1133 dst_release(&rt->dst);
1118 } 1134 }
1119 1135
1136 trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6);
1120 return pcpu_rt; 1137 return pcpu_rt;
1121 1138
1122 } 1139 }
@@ -1252,7 +1269,8 @@ static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1252 1269
1253static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie) 1270static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1254{ 1271{
1255 if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && 1272 if (!__rt6_check_expired(rt) &&
1273 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1256 rt6_check((struct rt6_info *)(rt->dst.from), cookie)) 1274 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1257 return &rt->dst; 1275 return &rt->dst;
1258 else 1276 else
@@ -1272,7 +1290,8 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1272 1290
1273 rt6_dst_from_metrics_check(rt); 1291 rt6_dst_from_metrics_check(rt);
1274 1292
1275 if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE)) 1293 if (rt->rt6i_flags & RTF_PCPU ||
1294 (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
1276 return rt6_dst_from_check(rt, cookie); 1295 return rt6_dst_from_check(rt, cookie);
1277 else 1296 else
1278 return rt6_check(rt, cookie); 1297 return rt6_check(rt, cookie);
@@ -1322,6 +1341,12 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1322 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); 1341 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1323} 1342}
1324 1343
1344static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1345{
1346 return !(rt->rt6i_flags & RTF_CACHE) &&
1347 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
1348}
1349
1325static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, 1350static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1326 const struct ipv6hdr *iph, u32 mtu) 1351 const struct ipv6hdr *iph, u32 mtu)
1327{ 1352{
@@ -1335,7 +1360,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1335 if (mtu >= dst_mtu(dst)) 1360 if (mtu >= dst_mtu(dst))
1336 return; 1361 return;
1337 1362
1338 if (rt6->rt6i_flags & RTF_CACHE) { 1363 if (!rt6_cache_allowed_for_pmtu(rt6)) {
1339 rt6_do_update_pmtu(rt6, mtu); 1364 rt6_do_update_pmtu(rt6, mtu);
1340 } else { 1365 } else {
1341 const struct in6_addr *daddr, *saddr; 1366 const struct in6_addr *daddr, *saddr;
@@ -1458,6 +1483,7 @@ out:
1458 1483
1459 read_unlock_bh(&table->tb6_lock); 1484 read_unlock_bh(&table->tb6_lock);
1460 1485
1486 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1461 return rt; 1487 return rt;
1462}; 1488};
1463 1489
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5baa8e754e41..c5429a636f1a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1690,6 +1690,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1690 const struct tcp_sock *tp = tcp_sk(sp); 1690 const struct tcp_sock *tp = tcp_sk(sp);
1691 const struct inet_connection_sock *icsk = inet_csk(sp); 1691 const struct inet_connection_sock *icsk = inet_csk(sp);
1692 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; 1692 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1693 int rx_queue;
1694 int state;
1693 1695
1694 dest = &sp->sk_v6_daddr; 1696 dest = &sp->sk_v6_daddr;
1695 src = &sp->sk_v6_rcv_saddr; 1697 src = &sp->sk_v6_rcv_saddr;
@@ -1710,6 +1712,15 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1710 timer_expires = jiffies; 1712 timer_expires = jiffies;
1711 } 1713 }
1712 1714
1715 state = sk_state_load(sp);
1716 if (state == TCP_LISTEN)
1717 rx_queue = sp->sk_ack_backlog;
1718 else
1719 /* Because we don't lock the socket,
1720 * we might find a transient negative value.
1721 */
1722 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1723
1713 seq_printf(seq, 1724 seq_printf(seq,
1714 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1725 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1715 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", 1726 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
@@ -1718,9 +1729,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1718 src->s6_addr32[2], src->s6_addr32[3], srcp, 1729 src->s6_addr32[2], src->s6_addr32[3], srcp,
1719 dest->s6_addr32[0], dest->s6_addr32[1], 1730 dest->s6_addr32[0], dest->s6_addr32[1],
1720 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1731 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1721 sp->sk_state, 1732 state,
1722 tp->write_seq-tp->snd_una, 1733 tp->write_seq - tp->snd_una,
1723 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 1734 rx_queue,
1724 timer_active, 1735 timer_active,
1725 jiffies_delta_to_clock_t(timer_expires - jiffies), 1736 jiffies_delta_to_clock_t(timer_expires - jiffies),
1726 icsk->icsk_retransmits, 1737 icsk->icsk_retransmits,
@@ -1732,7 +1743,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1732 jiffies_to_clock_t(icsk->icsk_ack.ato), 1743 jiffies_to_clock_t(icsk->icsk_ack.ato),
1733 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 1744 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1734 tp->snd_cwnd, 1745 tp->snd_cwnd,
1735 sp->sk_state == TCP_LISTEN ? 1746 state == TCP_LISTEN ?
1736 fastopenq->max_qlen : 1747 fastopenq->max_qlen :
1737 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) 1748 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1738 ); 1749 );
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 42e96729dae6..446e1300383e 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -217,8 +217,7 @@ __ieee802154_rx_handle_packet(struct ieee802154_local *local,
217 break; 217 break;
218 } 218 }
219 219
220 if (skb) 220 kfree_skb(skb);
221 kfree_skb(skb);
222} 221}
223 222
224static void 223static void
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index e22349ea7256..4692782b5280 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -869,7 +869,7 @@ config NETFILTER_XT_TARGET_TEE
869 depends on IPV6 || IPV6=n 869 depends on IPV6 || IPV6=n
870 depends on !NF_CONNTRACK || NF_CONNTRACK 870 depends on !NF_CONNTRACK || NF_CONNTRACK
871 select NF_DUP_IPV4 871 select NF_DUP_IPV4
872 select NF_DUP_IPV6 if IP6_NF_IPTABLES 872 select NF_DUP_IPV6 if IP6_NF_IPTABLES != n
873 ---help--- 873 ---help---
874 This option adds a "TEE" target with which a packet can be cloned and 874 This option adds a "TEE" target with which a packet can be cloned and
875 this clone be rerouted to another nexthop. 875 this clone be rerouted to another nexthop.
@@ -882,7 +882,7 @@ config NETFILTER_XT_TARGET_TPROXY
882 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n 882 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
883 depends on IP_NF_MANGLE 883 depends on IP_NF_MANGLE
884 select NF_DEFRAG_IPV4 884 select NF_DEFRAG_IPV4
885 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 885 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
886 help 886 help
887 This option adds a `TPROXY' target, which is somewhat similar to 887 This option adds a `TPROXY' target, which is somewhat similar to
888 REDIRECT. It can only be used in the mangle table and is useful 888 REDIRECT. It can only be used in the mangle table and is useful
@@ -1375,7 +1375,7 @@ config NETFILTER_XT_MATCH_SOCKET
1375 depends on IPV6 || IPV6=n 1375 depends on IPV6 || IPV6=n
1376 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n 1376 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
1377 select NF_DEFRAG_IPV4 1377 select NF_DEFRAG_IPV4
1378 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 1378 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
1379 help 1379 help
1380 This option adds a `socket' match, which can be used to match 1380 This option adds a `socket' match, which can be used to match
1381 packets for which a TCP or UDP socket lookup finds a valid socket. 1381 packets for which a TCP or UDP socket lookup finds a valid socket.
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index d05e759ed0fa..b0bc475f641e 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -33,7 +33,7 @@
33#define mtype_gc IPSET_TOKEN(MTYPE, _gc) 33#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
34#define mtype MTYPE 34#define mtype MTYPE
35 35
36#define get_ext(set, map, id) ((map)->extensions + (set)->dsize * (id)) 36#define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id)))
37 37
38static void 38static void
39mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) 39mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
@@ -67,12 +67,9 @@ mtype_destroy(struct ip_set *set)
67 del_timer_sync(&map->gc); 67 del_timer_sync(&map->gc);
68 68
69 ip_set_free(map->members); 69 ip_set_free(map->members);
70 if (set->dsize) { 70 if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
71 if (set->extensions & IPSET_EXT_DESTROY) 71 mtype_ext_cleanup(set);
72 mtype_ext_cleanup(set); 72 ip_set_free(map);
73 ip_set_free(map->extensions);
74 }
75 kfree(map);
76 73
77 set->data = NULL; 74 set->data = NULL;
78} 75}
@@ -92,16 +89,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
92{ 89{
93 const struct mtype *map = set->data; 90 const struct mtype *map = set->data;
94 struct nlattr *nested; 91 struct nlattr *nested;
92 size_t memsize = sizeof(*map) + map->memsize;
95 93
96 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 94 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
97 if (!nested) 95 if (!nested)
98 goto nla_put_failure; 96 goto nla_put_failure;
99 if (mtype_do_head(skb, map) || 97 if (mtype_do_head(skb, map) ||
100 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || 98 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
101 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, 99 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
102 htonl(sizeof(*map) +
103 map->memsize +
104 set->dsize * map->elements)))
105 goto nla_put_failure; 100 goto nla_put_failure;
106 if (unlikely(ip_set_put_flags(skb, set))) 101 if (unlikely(ip_set_put_flags(skb, set)))
107 goto nla_put_failure; 102 goto nla_put_failure;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 64a564334418..4783efff0bde 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -41,7 +41,6 @@ MODULE_ALIAS("ip_set_bitmap:ip");
41/* Type structure */ 41/* Type structure */
42struct bitmap_ip { 42struct bitmap_ip {
43 void *members; /* the set members */ 43 void *members; /* the set members */
44 void *extensions; /* data extensions */
45 u32 first_ip; /* host byte order, included in range */ 44 u32 first_ip; /* host byte order, included in range */
46 u32 last_ip; /* host byte order, included in range */ 45 u32 last_ip; /* host byte order, included in range */
47 u32 elements; /* number of max elements in the set */ 46 u32 elements; /* number of max elements in the set */
@@ -49,6 +48,8 @@ struct bitmap_ip {
49 size_t memsize; /* members size */ 48 size_t memsize; /* members size */
50 u8 netmask; /* subnet netmask */ 49 u8 netmask; /* subnet netmask */
51 struct timer_list gc; /* garbage collection */ 50 struct timer_list gc; /* garbage collection */
51 unsigned char extensions[0] /* data extensions */
52 __aligned(__alignof__(u64));
52}; 53};
53 54
54/* ADT structure for generic function args */ 55/* ADT structure for generic function args */
@@ -224,13 +225,6 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
224 map->members = ip_set_alloc(map->memsize); 225 map->members = ip_set_alloc(map->memsize);
225 if (!map->members) 226 if (!map->members)
226 return false; 227 return false;
227 if (set->dsize) {
228 map->extensions = ip_set_alloc(set->dsize * elements);
229 if (!map->extensions) {
230 kfree(map->members);
231 return false;
232 }
233 }
234 map->first_ip = first_ip; 228 map->first_ip = first_ip;
235 map->last_ip = last_ip; 229 map->last_ip = last_ip;
236 map->elements = elements; 230 map->elements = elements;
@@ -316,13 +310,13 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
316 pr_debug("hosts %u, elements %llu\n", 310 pr_debug("hosts %u, elements %llu\n",
317 hosts, (unsigned long long)elements); 311 hosts, (unsigned long long)elements);
318 312
319 map = kzalloc(sizeof(*map), GFP_KERNEL); 313 set->dsize = ip_set_elem_len(set, tb, 0, 0);
314 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
320 if (!map) 315 if (!map)
321 return -ENOMEM; 316 return -ENOMEM;
322 317
323 map->memsize = bitmap_bytes(0, elements - 1); 318 map->memsize = bitmap_bytes(0, elements - 1);
324 set->variant = &bitmap_ip; 319 set->variant = &bitmap_ip;
325 set->dsize = ip_set_elem_len(set, tb, 0);
326 if (!init_map_ip(set, map, first_ip, last_ip, 320 if (!init_map_ip(set, map, first_ip, last_ip,
327 elements, hosts, netmask)) { 321 elements, hosts, netmask)) {
328 kfree(map); 322 kfree(map);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 1430535118fb..29dde208381d 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -47,24 +47,26 @@ enum {
47/* Type structure */ 47/* Type structure */
48struct bitmap_ipmac { 48struct bitmap_ipmac {
49 void *members; /* the set members */ 49 void *members; /* the set members */
50 void *extensions; /* MAC + data extensions */
51 u32 first_ip; /* host byte order, included in range */ 50 u32 first_ip; /* host byte order, included in range */
52 u32 last_ip; /* host byte order, included in range */ 51 u32 last_ip; /* host byte order, included in range */
53 u32 elements; /* number of max elements in the set */ 52 u32 elements; /* number of max elements in the set */
54 size_t memsize; /* members size */ 53 size_t memsize; /* members size */
55 struct timer_list gc; /* garbage collector */ 54 struct timer_list gc; /* garbage collector */
55 unsigned char extensions[0] /* MAC + data extensions */
56 __aligned(__alignof__(u64));
56}; 57};
57 58
58/* ADT structure for generic function args */ 59/* ADT structure for generic function args */
59struct bitmap_ipmac_adt_elem { 60struct bitmap_ipmac_adt_elem {
61 unsigned char ether[ETH_ALEN] __aligned(2);
60 u16 id; 62 u16 id;
61 unsigned char *ether; 63 u16 add_mac;
62}; 64};
63 65
64struct bitmap_ipmac_elem { 66struct bitmap_ipmac_elem {
65 unsigned char ether[ETH_ALEN]; 67 unsigned char ether[ETH_ALEN];
66 unsigned char filled; 68 unsigned char filled;
67} __attribute__ ((aligned)); 69} __aligned(__alignof__(u64));
68 70
69static inline u32 71static inline u32
70ip_to_id(const struct bitmap_ipmac *m, u32 ip) 72ip_to_id(const struct bitmap_ipmac *m, u32 ip)
@@ -72,11 +74,11 @@ ip_to_id(const struct bitmap_ipmac *m, u32 ip)
72 return ip - m->first_ip; 74 return ip - m->first_ip;
73} 75}
74 76
75static inline struct bitmap_ipmac_elem * 77#define get_elem(extensions, id, dsize) \
76get_elem(void *extensions, u16 id, size_t dsize) 78 (struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
77{ 79
78 return (struct bitmap_ipmac_elem *)(extensions + id * dsize); 80#define get_const_elem(extensions, id, dsize) \
79} 81 (const struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
80 82
81/* Common functions */ 83/* Common functions */
82 84
@@ -88,10 +90,9 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
88 90
89 if (!test_bit(e->id, map->members)) 91 if (!test_bit(e->id, map->members))
90 return 0; 92 return 0;
91 elem = get_elem(map->extensions, e->id, dsize); 93 elem = get_const_elem(map->extensions, e->id, dsize);
92 if (elem->filled == MAC_FILLED) 94 if (e->add_mac && elem->filled == MAC_FILLED)
93 return !e->ether || 95 return ether_addr_equal(e->ether, elem->ether);
94 ether_addr_equal(e->ether, elem->ether);
95 /* Trigger kernel to fill out the ethernet address */ 96 /* Trigger kernel to fill out the ethernet address */
96 return -EAGAIN; 97 return -EAGAIN;
97} 98}
@@ -103,7 +104,7 @@ bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
103 104
104 if (!test_bit(id, map->members)) 105 if (!test_bit(id, map->members))
105 return 0; 106 return 0;
106 elem = get_elem(map->extensions, id, dsize); 107 elem = get_const_elem(map->extensions, id, dsize);
107 /* Timer not started for the incomplete elements */ 108 /* Timer not started for the incomplete elements */
108 return elem->filled == MAC_FILLED; 109 return elem->filled == MAC_FILLED;
109} 110}
@@ -133,7 +134,7 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
133 * and we can reuse it later when MAC is filled out, 134 * and we can reuse it later when MAC is filled out,
134 * possibly by the kernel 135 * possibly by the kernel
135 */ 136 */
136 if (e->ether) 137 if (e->add_mac)
137 ip_set_timeout_set(timeout, t); 138 ip_set_timeout_set(timeout, t);
138 else 139 else
139 *timeout = t; 140 *timeout = t;
@@ -150,7 +151,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
150 elem = get_elem(map->extensions, e->id, dsize); 151 elem = get_elem(map->extensions, e->id, dsize);
151 if (test_bit(e->id, map->members)) { 152 if (test_bit(e->id, map->members)) {
152 if (elem->filled == MAC_FILLED) { 153 if (elem->filled == MAC_FILLED) {
153 if (e->ether && 154 if (e->add_mac &&
154 (flags & IPSET_FLAG_EXIST) && 155 (flags & IPSET_FLAG_EXIST) &&
155 !ether_addr_equal(e->ether, elem->ether)) { 156 !ether_addr_equal(e->ether, elem->ether)) {
156 /* memcpy isn't atomic */ 157 /* memcpy isn't atomic */
@@ -159,7 +160,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
159 ether_addr_copy(elem->ether, e->ether); 160 ether_addr_copy(elem->ether, e->ether);
160 } 161 }
161 return IPSET_ADD_FAILED; 162 return IPSET_ADD_FAILED;
162 } else if (!e->ether) 163 } else if (!e->add_mac)
163 /* Already added without ethernet address */ 164 /* Already added without ethernet address */
164 return IPSET_ADD_FAILED; 165 return IPSET_ADD_FAILED;
165 /* Fill the MAC address and trigger the timer activation */ 166 /* Fill the MAC address and trigger the timer activation */
@@ -168,7 +169,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
168 ether_addr_copy(elem->ether, e->ether); 169 ether_addr_copy(elem->ether, e->ether);
169 elem->filled = MAC_FILLED; 170 elem->filled = MAC_FILLED;
170 return IPSET_ADD_START_STORED_TIMEOUT; 171 return IPSET_ADD_START_STORED_TIMEOUT;
171 } else if (e->ether) { 172 } else if (e->add_mac) {
172 /* We can store MAC too */ 173 /* We can store MAC too */
173 ether_addr_copy(elem->ether, e->ether); 174 ether_addr_copy(elem->ether, e->ether);
174 elem->filled = MAC_FILLED; 175 elem->filled = MAC_FILLED;
@@ -191,7 +192,7 @@ bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
191 u32 id, size_t dsize) 192 u32 id, size_t dsize)
192{ 193{
193 const struct bitmap_ipmac_elem *elem = 194 const struct bitmap_ipmac_elem *elem =
194 get_elem(map->extensions, id, dsize); 195 get_const_elem(map->extensions, id, dsize);
195 196
196 return nla_put_ipaddr4(skb, IPSET_ATTR_IP, 197 return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
197 htonl(map->first_ip + id)) || 198 htonl(map->first_ip + id)) ||
@@ -213,7 +214,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
213{ 214{
214 struct bitmap_ipmac *map = set->data; 215 struct bitmap_ipmac *map = set->data;
215 ipset_adtfn adtfn = set->variant->adt[adt]; 216 ipset_adtfn adtfn = set->variant->adt[adt];
216 struct bitmap_ipmac_adt_elem e = { .id = 0 }; 217 struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 };
217 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); 218 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
218 u32 ip; 219 u32 ip;
219 220
@@ -231,7 +232,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
231 return -EINVAL; 232 return -EINVAL;
232 233
233 e.id = ip_to_id(map, ip); 234 e.id = ip_to_id(map, ip);
234 e.ether = eth_hdr(skb)->h_source; 235 memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
235 236
236 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); 237 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
237} 238}
@@ -265,11 +266,10 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
265 return -IPSET_ERR_BITMAP_RANGE; 266 return -IPSET_ERR_BITMAP_RANGE;
266 267
267 e.id = ip_to_id(map, ip); 268 e.id = ip_to_id(map, ip);
268 if (tb[IPSET_ATTR_ETHER]) 269 if (tb[IPSET_ATTR_ETHER]) {
269 e.ether = nla_data(tb[IPSET_ATTR_ETHER]); 270 memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
270 else 271 e.add_mac = 1;
271 e.ether = NULL; 272 }
272
273 ret = adtfn(set, &e, &ext, &ext, flags); 273 ret = adtfn(set, &e, &ext, &ext, flags);
274 274
275 return ip_set_eexist(ret, flags) ? 0 : ret; 275 return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -300,13 +300,6 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
300 map->members = ip_set_alloc(map->memsize); 300 map->members = ip_set_alloc(map->memsize);
301 if (!map->members) 301 if (!map->members)
302 return false; 302 return false;
303 if (set->dsize) {
304 map->extensions = ip_set_alloc(set->dsize * elements);
305 if (!map->extensions) {
306 kfree(map->members);
307 return false;
308 }
309 }
310 map->first_ip = first_ip; 303 map->first_ip = first_ip;
311 map->last_ip = last_ip; 304 map->last_ip = last_ip;
312 map->elements = elements; 305 map->elements = elements;
@@ -361,14 +354,15 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
361 if (elements > IPSET_BITMAP_MAX_RANGE + 1) 354 if (elements > IPSET_BITMAP_MAX_RANGE + 1)
362 return -IPSET_ERR_BITMAP_RANGE_SIZE; 355 return -IPSET_ERR_BITMAP_RANGE_SIZE;
363 356
364 map = kzalloc(sizeof(*map), GFP_KERNEL); 357 set->dsize = ip_set_elem_len(set, tb,
358 sizeof(struct bitmap_ipmac_elem),
359 __alignof__(struct bitmap_ipmac_elem));
360 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
365 if (!map) 361 if (!map)
366 return -ENOMEM; 362 return -ENOMEM;
367 363
368 map->memsize = bitmap_bytes(0, elements - 1); 364 map->memsize = bitmap_bytes(0, elements - 1);
369 set->variant = &bitmap_ipmac; 365 set->variant = &bitmap_ipmac;
370 set->dsize = ip_set_elem_len(set, tb,
371 sizeof(struct bitmap_ipmac_elem));
372 if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { 366 if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
373 kfree(map); 367 kfree(map);
374 return -ENOMEM; 368 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 5338ccd5da46..7f0c733358a4 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -35,12 +35,13 @@ MODULE_ALIAS("ip_set_bitmap:port");
35/* Type structure */ 35/* Type structure */
36struct bitmap_port { 36struct bitmap_port {
37 void *members; /* the set members */ 37 void *members; /* the set members */
38 void *extensions; /* data extensions */
39 u16 first_port; /* host byte order, included in range */ 38 u16 first_port; /* host byte order, included in range */
40 u16 last_port; /* host byte order, included in range */ 39 u16 last_port; /* host byte order, included in range */
41 u32 elements; /* number of max elements in the set */ 40 u32 elements; /* number of max elements in the set */
42 size_t memsize; /* members size */ 41 size_t memsize; /* members size */
43 struct timer_list gc; /* garbage collection */ 42 struct timer_list gc; /* garbage collection */
43 unsigned char extensions[0] /* data extensions */
44 __aligned(__alignof__(u64));
44}; 45};
45 46
46/* ADT structure for generic function args */ 47/* ADT structure for generic function args */
@@ -209,13 +210,6 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
209 map->members = ip_set_alloc(map->memsize); 210 map->members = ip_set_alloc(map->memsize);
210 if (!map->members) 211 if (!map->members)
211 return false; 212 return false;
212 if (set->dsize) {
213 map->extensions = ip_set_alloc(set->dsize * map->elements);
214 if (!map->extensions) {
215 kfree(map->members);
216 return false;
217 }
218 }
219 map->first_port = first_port; 213 map->first_port = first_port;
220 map->last_port = last_port; 214 map->last_port = last_port;
221 set->timeout = IPSET_NO_TIMEOUT; 215 set->timeout = IPSET_NO_TIMEOUT;
@@ -232,6 +226,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
232{ 226{
233 struct bitmap_port *map; 227 struct bitmap_port *map;
234 u16 first_port, last_port; 228 u16 first_port, last_port;
229 u32 elements;
235 230
236 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 231 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
237 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) || 232 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@@ -248,14 +243,15 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
248 last_port = tmp; 243 last_port = tmp;
249 } 244 }
250 245
251 map = kzalloc(sizeof(*map), GFP_KERNEL); 246 elements = last_port - first_port + 1;
247 set->dsize = ip_set_elem_len(set, tb, 0, 0);
248 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
252 if (!map) 249 if (!map)
253 return -ENOMEM; 250 return -ENOMEM;
254 251
255 map->elements = last_port - first_port + 1; 252 map->elements = elements;
256 map->memsize = bitmap_bytes(0, map->elements); 253 map->memsize = bitmap_bytes(0, map->elements);
257 set->variant = &bitmap_port; 254 set->variant = &bitmap_port;
258 set->dsize = ip_set_elem_len(set, tb, 0);
259 if (!init_map_port(set, map, first_port, last_port)) { 255 if (!init_map_port(set, map, first_port, last_port)) {
260 kfree(map); 256 kfree(map);
261 return -ENOMEM; 257 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 69ab9c2634e1..54f3d7cb23e6 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -364,25 +364,27 @@ add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
364} 364}
365 365
366size_t 366size_t
367ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len) 367ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
368 size_t align)
368{ 369{
369 enum ip_set_ext_id id; 370 enum ip_set_ext_id id;
370 size_t offset = len;
371 u32 cadt_flags = 0; 371 u32 cadt_flags = 0;
372 372
373 if (tb[IPSET_ATTR_CADT_FLAGS]) 373 if (tb[IPSET_ATTR_CADT_FLAGS])
374 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); 374 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
375 if (cadt_flags & IPSET_FLAG_WITH_FORCEADD) 375 if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
376 set->flags |= IPSET_CREATE_FLAG_FORCEADD; 376 set->flags |= IPSET_CREATE_FLAG_FORCEADD;
377 if (!align)
378 align = 1;
377 for (id = 0; id < IPSET_EXT_ID_MAX; id++) { 379 for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
378 if (!add_extension(id, cadt_flags, tb)) 380 if (!add_extension(id, cadt_flags, tb))
379 continue; 381 continue;
380 offset = ALIGN(offset, ip_set_extensions[id].align); 382 len = ALIGN(len, ip_set_extensions[id].align);
381 set->offset[id] = offset; 383 set->offset[id] = len;
382 set->extensions |= ip_set_extensions[id].type; 384 set->extensions |= ip_set_extensions[id].type;
383 offset += ip_set_extensions[id].len; 385 len += ip_set_extensions[id].len;
384 } 386 }
385 return offset; 387 return ALIGN(len, align);
386} 388}
387EXPORT_SYMBOL_GPL(ip_set_elem_len); 389EXPORT_SYMBOL_GPL(ip_set_elem_len);
388 390
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 691b54fcaf2a..e5336ab36d67 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -72,8 +72,9 @@ struct hbucket {
72 DECLARE_BITMAP(used, AHASH_MAX_TUNED); 72 DECLARE_BITMAP(used, AHASH_MAX_TUNED);
73 u8 size; /* size of the array */ 73 u8 size; /* size of the array */
74 u8 pos; /* position of the first free entry */ 74 u8 pos; /* position of the first free entry */
75 unsigned char value[0]; /* the array of the values */ 75 unsigned char value[0] /* the array of the values */
76} __attribute__ ((aligned)); 76 __aligned(__alignof__(u64));
77};
77 78
78/* The hash table: the table size stored here in order to make resizing easy */ 79/* The hash table: the table size stored here in order to make resizing easy */
79struct htable { 80struct htable {
@@ -475,7 +476,7 @@ static void
475mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize) 476mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
476{ 477{
477 struct htable *t; 478 struct htable *t;
478 struct hbucket *n; 479 struct hbucket *n, *tmp;
479 struct mtype_elem *data; 480 struct mtype_elem *data;
480 u32 i, j, d; 481 u32 i, j, d;
481#ifdef IP_SET_HASH_WITH_NETS 482#ifdef IP_SET_HASH_WITH_NETS
@@ -510,9 +511,14 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
510 } 511 }
511 } 512 }
512 if (d >= AHASH_INIT_SIZE) { 513 if (d >= AHASH_INIT_SIZE) {
513 struct hbucket *tmp = kzalloc(sizeof(*tmp) + 514 if (d >= n->size) {
514 (n->size - AHASH_INIT_SIZE) * dsize, 515 rcu_assign_pointer(hbucket(t, i), NULL);
515 GFP_ATOMIC); 516 kfree_rcu(n, rcu);
517 continue;
518 }
519 tmp = kzalloc(sizeof(*tmp) +
520 (n->size - AHASH_INIT_SIZE) * dsize,
521 GFP_ATOMIC);
516 if (!tmp) 522 if (!tmp)
517 /* Still try to delete expired elements */ 523 /* Still try to delete expired elements */
518 continue; 524 continue;
@@ -522,7 +528,7 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
522 continue; 528 continue;
523 data = ahash_data(n, j, dsize); 529 data = ahash_data(n, j, dsize);
524 memcpy(tmp->value + d * dsize, data, dsize); 530 memcpy(tmp->value + d * dsize, data, dsize);
525 set_bit(j, tmp->used); 531 set_bit(d, tmp->used);
526 d++; 532 d++;
527 } 533 }
528 tmp->pos = d; 534 tmp->pos = d;
@@ -1323,12 +1329,14 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
1323#endif 1329#endif
1324 set->variant = &IPSET_TOKEN(HTYPE, 4_variant); 1330 set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
1325 set->dsize = ip_set_elem_len(set, tb, 1331 set->dsize = ip_set_elem_len(set, tb,
1326 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem))); 1332 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
1333 __alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
1327#ifndef IP_SET_PROTO_UNDEF 1334#ifndef IP_SET_PROTO_UNDEF
1328 } else { 1335 } else {
1329 set->variant = &IPSET_TOKEN(HTYPE, 6_variant); 1336 set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
1330 set->dsize = ip_set_elem_len(set, tb, 1337 set->dsize = ip_set_elem_len(set, tb,
1331 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem))); 1338 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
1339 __alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
1332 } 1340 }
1333#endif 1341#endif
1334 if (tb[IPSET_ATTR_TIMEOUT]) { 1342 if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 5a30ce6e8c90..bbede95c9f68 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -31,7 +31,7 @@ struct set_elem {
31 struct rcu_head rcu; 31 struct rcu_head rcu;
32 struct list_head list; 32 struct list_head list;
33 ip_set_id_t id; 33 ip_set_id_t id;
34}; 34} __aligned(__alignof__(u64));
35 35
36struct set_adt_elem { 36struct set_adt_elem {
37 ip_set_id_t id; 37 ip_set_id_t id;
@@ -618,7 +618,8 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
618 size = IP_SET_LIST_MIN_SIZE; 618 size = IP_SET_LIST_MIN_SIZE;
619 619
620 set->variant = &set_variant; 620 set->variant = &set_variant;
621 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem)); 621 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
622 __alignof__(struct set_elem));
622 if (!init_list_set(net, set, size)) 623 if (!init_list_set(net, set, size))
623 return -ENOMEM; 624 return -ENOMEM;
624 if (tb[IPSET_ATTR_TIMEOUT]) { 625 if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1e24fff53e4b..f57b4dcdb233 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1176,6 +1176,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1176 struct ip_vs_protocol *pp; 1176 struct ip_vs_protocol *pp;
1177 struct ip_vs_proto_data *pd; 1177 struct ip_vs_proto_data *pd;
1178 struct ip_vs_conn *cp; 1178 struct ip_vs_conn *cp;
1179 struct sock *sk;
1179 1180
1180 EnterFunction(11); 1181 EnterFunction(11);
1181 1182
@@ -1183,13 +1184,12 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1183 if (skb->ipvs_property) 1184 if (skb->ipvs_property)
1184 return NF_ACCEPT; 1185 return NF_ACCEPT;
1185 1186
1187 sk = skb_to_full_sk(skb);
1186 /* Bad... Do not break raw sockets */ 1188 /* Bad... Do not break raw sockets */
1187 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && 1189 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1188 af == AF_INET)) { 1190 af == AF_INET)) {
1189 struct sock *sk = skb->sk;
1190 struct inet_sock *inet = inet_sk(skb->sk);
1191 1191
1192 if (inet && sk->sk_family == PF_INET && inet->nodefrag) 1192 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1193 return NF_ACCEPT; 1193 return NF_ACCEPT;
1194 } 1194 }
1195 1195
@@ -1681,6 +1681,7 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
1681 struct ip_vs_conn *cp; 1681 struct ip_vs_conn *cp;
1682 int ret, pkts; 1682 int ret, pkts;
1683 int conn_reuse_mode; 1683 int conn_reuse_mode;
1684 struct sock *sk;
1684 1685
1685 /* Already marked as IPVS request or reply? */ 1686 /* Already marked as IPVS request or reply? */
1686 if (skb->ipvs_property) 1687 if (skb->ipvs_property)
@@ -1708,12 +1709,11 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
1708 ip_vs_fill_iph_skb(af, skb, false, &iph); 1709 ip_vs_fill_iph_skb(af, skb, false, &iph);
1709 1710
1710 /* Bad... Do not break raw sockets */ 1711 /* Bad... Do not break raw sockets */
1711 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && 1712 sk = skb_to_full_sk(skb);
1713 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1712 af == AF_INET)) { 1714 af == AF_INET)) {
1713 struct sock *sk = skb->sk;
1714 struct inet_sock *inet = inet_sk(skb->sk);
1715 1715
1716 if (inet && sk->sk_family == PF_INET && inet->nodefrag) 1716 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1717 return NF_ACCEPT; 1717 return NF_ACCEPT;
1718 } 1718 }
1719 1719
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 06eb48fceb42..740cce4685ac 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -825,7 +825,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
825 struct net *net = sock_net(ctnl); 825 struct net *net = sock_net(ctnl);
826 struct nfnl_log_net *log = nfnl_log_pernet(net); 826 struct nfnl_log_net *log = nfnl_log_pernet(net);
827 int ret = 0; 827 int ret = 0;
828 u16 flags; 828 u16 flags = 0;
829 829
830 if (nfula[NFULA_CFG_CMD]) { 830 if (nfula[NFULA_CFG_CMD]) {
831 u_int8_t pf = nfmsg->nfgen_family; 831 u_int8_t pf = nfmsg->nfgen_family;
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index 1067fb4c1ffa..c7808fc19719 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -47,27 +47,34 @@ static void nft_counter_eval(const struct nft_expr *expr,
47 local_bh_enable(); 47 local_bh_enable();
48} 48}
49 49
50static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr) 50static void nft_counter_fetch(const struct nft_counter_percpu __percpu *counter,
51 struct nft_counter *total)
51{ 52{
52 struct nft_counter_percpu_priv *priv = nft_expr_priv(expr); 53 const struct nft_counter_percpu *cpu_stats;
53 struct nft_counter_percpu *cpu_stats;
54 struct nft_counter total;
55 u64 bytes, packets; 54 u64 bytes, packets;
56 unsigned int seq; 55 unsigned int seq;
57 int cpu; 56 int cpu;
58 57
59 memset(&total, 0, sizeof(total)); 58 memset(total, 0, sizeof(*total));
60 for_each_possible_cpu(cpu) { 59 for_each_possible_cpu(cpu) {
61 cpu_stats = per_cpu_ptr(priv->counter, cpu); 60 cpu_stats = per_cpu_ptr(counter, cpu);
62 do { 61 do {
63 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 62 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
64 bytes = cpu_stats->counter.bytes; 63 bytes = cpu_stats->counter.bytes;
65 packets = cpu_stats->counter.packets; 64 packets = cpu_stats->counter.packets;
66 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq)); 65 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
67 66
68 total.packets += packets; 67 total->packets += packets;
69 total.bytes += bytes; 68 total->bytes += bytes;
70 } 69 }
70}
71
72static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
73{
74 struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
75 struct nft_counter total;
76
77 nft_counter_fetch(priv->counter, &total);
71 78
72 if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) || 79 if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
73 nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets))) 80 nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
@@ -118,6 +125,31 @@ static void nft_counter_destroy(const struct nft_ctx *ctx,
118 free_percpu(priv->counter); 125 free_percpu(priv->counter);
119} 126}
120 127
128static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
129{
130 struct nft_counter_percpu_priv *priv = nft_expr_priv(src);
131 struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst);
132 struct nft_counter_percpu __percpu *cpu_stats;
133 struct nft_counter_percpu *this_cpu;
134 struct nft_counter total;
135
136 nft_counter_fetch(priv->counter, &total);
137
138 cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu,
139 GFP_ATOMIC);
140 if (cpu_stats == NULL)
141 return ENOMEM;
142
143 preempt_disable();
144 this_cpu = this_cpu_ptr(cpu_stats);
145 this_cpu->counter.packets = total.packets;
146 this_cpu->counter.bytes = total.bytes;
147 preempt_enable();
148
149 priv_clone->counter = cpu_stats;
150 return 0;
151}
152
121static struct nft_expr_type nft_counter_type; 153static struct nft_expr_type nft_counter_type;
122static const struct nft_expr_ops nft_counter_ops = { 154static const struct nft_expr_ops nft_counter_ops = {
123 .type = &nft_counter_type, 155 .type = &nft_counter_type,
@@ -126,6 +158,7 @@ static const struct nft_expr_ops nft_counter_ops = {
126 .init = nft_counter_init, 158 .init = nft_counter_init,
127 .destroy = nft_counter_destroy, 159 .destroy = nft_counter_destroy,
128 .dump = nft_counter_dump, 160 .dump = nft_counter_dump,
161 .clone = nft_counter_clone,
129}; 162};
130 163
131static struct nft_expr_type nft_counter_type __read_mostly = { 164static struct nft_expr_type nft_counter_type __read_mostly = {
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 513a8ef60a59..9dec3bd1b63c 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -50,8 +50,9 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
50 } 50 }
51 51
52 ext = nft_set_elem_ext(set, elem); 52 ext = nft_set_elem_ext(set, elem);
53 if (priv->expr != NULL) 53 if (priv->expr != NULL &&
54 nft_expr_clone(nft_set_ext_expr(ext), priv->expr); 54 nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
55 return NULL;
55 56
56 return elem; 57 return elem;
57} 58}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index af399cac5205..1cf928fb573e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1741,6 +1741,20 @@ static void fanout_release(struct sock *sk)
1741 kfree_rcu(po->rollover, rcu); 1741 kfree_rcu(po->rollover, rcu);
1742} 1742}
1743 1743
1744static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1745 struct sk_buff *skb)
1746{
1747 /* Earlier code assumed this would be a VLAN pkt, double-check
1748 * this now that we have the actual packet in hand. We can only
1749 * do this check on Ethernet devices.
1750 */
1751 if (unlikely(dev->type != ARPHRD_ETHER))
1752 return false;
1753
1754 skb_reset_mac_header(skb);
1755 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1756}
1757
1744static const struct proto_ops packet_ops; 1758static const struct proto_ops packet_ops;
1745 1759
1746static const struct proto_ops packet_ops_spkt; 1760static const struct proto_ops packet_ops_spkt;
@@ -1902,18 +1916,10 @@ retry:
1902 goto retry; 1916 goto retry;
1903 } 1917 }
1904 1918
1905 if (len > (dev->mtu + dev->hard_header_len + extra_len)) { 1919 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1906 /* Earlier code assumed this would be a VLAN pkt, 1920 !packet_extra_vlan_len_allowed(dev, skb)) {
1907 * double-check this now that we have the actual 1921 err = -EMSGSIZE;
1908 * packet in hand. 1922 goto out_unlock;
1909 */
1910 struct ethhdr *ehdr;
1911 skb_reset_mac_header(skb);
1912 ehdr = eth_hdr(skb);
1913 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1914 err = -EMSGSIZE;
1915 goto out_unlock;
1916 }
1917 } 1923 }
1918 1924
1919 skb->protocol = proto; 1925 skb->protocol = proto;
@@ -2332,6 +2338,15 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
2332 return false; 2338 return false;
2333} 2339}
2334 2340
2341static void tpacket_set_protocol(const struct net_device *dev,
2342 struct sk_buff *skb)
2343{
2344 if (dev->type == ARPHRD_ETHER) {
2345 skb_reset_mac_header(skb);
2346 skb->protocol = eth_hdr(skb)->h_proto;
2347 }
2348}
2349
2335static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2350static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2336 void *frame, struct net_device *dev, int size_max, 2351 void *frame, struct net_device *dev, int size_max,
2337 __be16 proto, unsigned char *addr, int hlen) 2352 __be16 proto, unsigned char *addr, int hlen)
@@ -2368,8 +2383,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2368 skb_reserve(skb, hlen); 2383 skb_reserve(skb, hlen);
2369 skb_reset_network_header(skb); 2384 skb_reset_network_header(skb);
2370 2385
2371 if (!packet_use_direct_xmit(po))
2372 skb_probe_transport_header(skb, 0);
2373 if (unlikely(po->tp_tx_has_off)) { 2386 if (unlikely(po->tp_tx_has_off)) {
2374 int off_min, off_max, off; 2387 int off_min, off_max, off;
2375 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2388 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
@@ -2415,6 +2428,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2415 dev->hard_header_len); 2428 dev->hard_header_len);
2416 if (unlikely(err)) 2429 if (unlikely(err))
2417 return err; 2430 return err;
2431 if (!skb->protocol)
2432 tpacket_set_protocol(dev, skb);
2418 2433
2419 data += dev->hard_header_len; 2434 data += dev->hard_header_len;
2420 to_write -= dev->hard_header_len; 2435 to_write -= dev->hard_header_len;
@@ -2449,6 +2464,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2449 len = ((to_write > len_max) ? len_max : to_write); 2464 len = ((to_write > len_max) ? len_max : to_write);
2450 } 2465 }
2451 2466
2467 skb_probe_transport_header(skb, 0);
2468
2452 return tp_len; 2469 return tp_len;
2453} 2470}
2454 2471
@@ -2493,12 +2510,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2493 if (unlikely(!(dev->flags & IFF_UP))) 2510 if (unlikely(!(dev->flags & IFF_UP)))
2494 goto out_put; 2511 goto out_put;
2495 2512
2496 reserve = dev->hard_header_len + VLAN_HLEN; 2513 if (po->sk.sk_socket->type == SOCK_RAW)
2514 reserve = dev->hard_header_len;
2497 size_max = po->tx_ring.frame_size 2515 size_max = po->tx_ring.frame_size
2498 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2516 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2499 2517
2500 if (size_max > dev->mtu + reserve) 2518 if (size_max > dev->mtu + reserve + VLAN_HLEN)
2501 size_max = dev->mtu + reserve; 2519 size_max = dev->mtu + reserve + VLAN_HLEN;
2502 2520
2503 do { 2521 do {
2504 ph = packet_current_frame(po, &po->tx_ring, 2522 ph = packet_current_frame(po, &po->tx_ring,
@@ -2525,18 +2543,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2525 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2543 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2526 addr, hlen); 2544 addr, hlen);
2527 if (likely(tp_len >= 0) && 2545 if (likely(tp_len >= 0) &&
2528 tp_len > dev->mtu + dev->hard_header_len) { 2546 tp_len > dev->mtu + reserve &&
2529 struct ethhdr *ehdr; 2547 !packet_extra_vlan_len_allowed(dev, skb))
2530 /* Earlier code assumed this would be a VLAN pkt, 2548 tp_len = -EMSGSIZE;
2531 * double-check this now that we have the actual
2532 * packet in hand.
2533 */
2534 2549
2535 skb_reset_mac_header(skb);
2536 ehdr = eth_hdr(skb);
2537 if (ehdr->h_proto != htons(ETH_P_8021Q))
2538 tp_len = -EMSGSIZE;
2539 }
2540 if (unlikely(tp_len < 0)) { 2550 if (unlikely(tp_len < 0)) {
2541 if (po->tp_loss) { 2551 if (po->tp_loss) {
2542 __packet_set_status(po, ph, 2552 __packet_set_status(po, ph,
@@ -2765,18 +2775,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2765 2775
2766 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); 2776 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2767 2777
2768 if (!gso_type && (len > dev->mtu + reserve + extra_len)) { 2778 if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
2769 /* Earlier code assumed this would be a VLAN pkt, 2779 !packet_extra_vlan_len_allowed(dev, skb)) {
2770 * double-check this now that we have the actual 2780 err = -EMSGSIZE;
2771 * packet in hand. 2781 goto out_free;
2772 */
2773 struct ethhdr *ehdr;
2774 skb_reset_mac_header(skb);
2775 ehdr = eth_hdr(skb);
2776 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2777 err = -EMSGSIZE;
2778 goto out_free;
2779 }
2780 } 2782 }
2781 2783
2782 skb->protocol = proto; 2784 skb->protocol = proto;
@@ -2807,8 +2809,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2807 len += vnet_hdr_len; 2809 len += vnet_hdr_len;
2808 } 2810 }
2809 2811
2810 if (!packet_use_direct_xmit(po)) 2812 skb_probe_transport_header(skb, reserve);
2811 skb_probe_transport_header(skb, reserve); 2813
2812 if (unlikely(extra_len == 4)) 2814 if (unlikely(extra_len == 4))
2813 skb->no_fcs = 1; 2815 skb->no_fcs = 1;
2814 2816
@@ -4107,7 +4109,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4107 err = -EINVAL; 4109 err = -EINVAL;
4108 if (unlikely((int)req->tp_block_size <= 0)) 4110 if (unlikely((int)req->tp_block_size <= 0))
4109 goto out; 4111 goto out;
4110 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) 4112 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4111 goto out; 4113 goto out;
4112 if (po->tp_version >= TPACKET_V3 && 4114 if (po->tp_version >= TPACKET_V3 &&
4113 (int)(req->tp_block_size - 4115 (int)(req->tp_block_size -
@@ -4119,8 +4121,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4119 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4121 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4120 goto out; 4122 goto out;
4121 4123
4122 rb->frames_per_block = req->tp_block_size/req->tp_frame_size; 4124 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4123 if (unlikely(rb->frames_per_block <= 0)) 4125 if (unlikely(rb->frames_per_block == 0))
4124 goto out; 4126 goto out;
4125 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4127 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4126 req->tp_frame_nr)) 4128 req->tp_frame_nr))
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 4f15b7d730e1..1543e39f47c3 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -809,8 +809,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
809 if (!has_sha1) 809 if (!has_sha1)
810 return -EINVAL; 810 return -EINVAL;
811 811
812 memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0], 812 for (i = 0; i < hmacs->shmac_num_idents; i++)
813 hmacs->shmac_num_idents * sizeof(__u16)); 813 ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
814 ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + 814 ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
815 hmacs->shmac_num_idents * sizeof(__u16)); 815 hmacs->shmac_num_idents * sizeof(__u16));
816 return 0; 816 return 0;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index dace13d7638e..799e65b944b9 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1411,17 +1411,16 @@ gss_key_timeout(struct rpc_cred *rc)
1411{ 1411{
1412 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1412 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1413 struct gss_cl_ctx *ctx; 1413 struct gss_cl_ctx *ctx;
1414 unsigned long now = jiffies; 1414 unsigned long timeout = jiffies + (gss_key_expire_timeo * HZ);
1415 unsigned long expire; 1415 int ret = 0;
1416 1416
1417 rcu_read_lock(); 1417 rcu_read_lock();
1418 ctx = rcu_dereference(gss_cred->gc_ctx); 1418 ctx = rcu_dereference(gss_cred->gc_ctx);
1419 if (ctx) 1419 if (!ctx || time_after(timeout, ctx->gc_expiry))
1420 expire = ctx->gc_expiry - (gss_key_expire_timeo * HZ); 1420 ret = -EACCES;
1421 rcu_read_unlock(); 1421 rcu_read_unlock();
1422 if (!ctx || time_after(now, expire)) 1422
1423 return -EACCES; 1423 return ret;
1424 return 0;
1425} 1424}
1426 1425
1427static int 1426static int
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 4a2340a54401..5e4f815c2b34 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -41,13 +41,16 @@
41static bool cache_defer_req(struct cache_req *req, struct cache_head *item); 41static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
42static void cache_revisit_request(struct cache_head *item); 42static void cache_revisit_request(struct cache_head *item);
43 43
44static void cache_init(struct cache_head *h) 44static void cache_init(struct cache_head *h, struct cache_detail *detail)
45{ 45{
46 time_t now = seconds_since_boot(); 46 time_t now = seconds_since_boot();
47 INIT_HLIST_NODE(&h->cache_list); 47 INIT_HLIST_NODE(&h->cache_list);
48 h->flags = 0; 48 h->flags = 0;
49 kref_init(&h->ref); 49 kref_init(&h->ref);
50 h->expiry_time = now + CACHE_NEW_EXPIRY; 50 h->expiry_time = now + CACHE_NEW_EXPIRY;
51 if (now <= detail->flush_time)
52 /* ensure it isn't already expired */
53 now = detail->flush_time + 1;
51 h->last_refresh = now; 54 h->last_refresh = now;
52} 55}
53 56
@@ -81,7 +84,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
81 * we might get lose if we need to 84 * we might get lose if we need to
82 * cache_put it soon. 85 * cache_put it soon.
83 */ 86 */
84 cache_init(new); 87 cache_init(new, detail);
85 detail->init(new, key); 88 detail->init(new, key);
86 89
87 write_lock(&detail->hash_lock); 90 write_lock(&detail->hash_lock);
@@ -116,10 +119,15 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
116 119
117static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); 120static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
118 121
119static void cache_fresh_locked(struct cache_head *head, time_t expiry) 122static void cache_fresh_locked(struct cache_head *head, time_t expiry,
123 struct cache_detail *detail)
120{ 124{
125 time_t now = seconds_since_boot();
126 if (now <= detail->flush_time)
127 /* ensure it isn't immediately treated as expired */
128 now = detail->flush_time + 1;
121 head->expiry_time = expiry; 129 head->expiry_time = expiry;
122 head->last_refresh = seconds_since_boot(); 130 head->last_refresh = now;
123 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ 131 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
124 set_bit(CACHE_VALID, &head->flags); 132 set_bit(CACHE_VALID, &head->flags);
125} 133}
@@ -149,7 +157,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
149 set_bit(CACHE_NEGATIVE, &old->flags); 157 set_bit(CACHE_NEGATIVE, &old->flags);
150 else 158 else
151 detail->update(old, new); 159 detail->update(old, new);
152 cache_fresh_locked(old, new->expiry_time); 160 cache_fresh_locked(old, new->expiry_time, detail);
153 write_unlock(&detail->hash_lock); 161 write_unlock(&detail->hash_lock);
154 cache_fresh_unlocked(old, detail); 162 cache_fresh_unlocked(old, detail);
155 return old; 163 return old;
@@ -162,7 +170,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
162 cache_put(old, detail); 170 cache_put(old, detail);
163 return NULL; 171 return NULL;
164 } 172 }
165 cache_init(tmp); 173 cache_init(tmp, detail);
166 detail->init(tmp, old); 174 detail->init(tmp, old);
167 175
168 write_lock(&detail->hash_lock); 176 write_lock(&detail->hash_lock);
@@ -173,8 +181,8 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
173 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]); 181 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
174 detail->entries++; 182 detail->entries++;
175 cache_get(tmp); 183 cache_get(tmp);
176 cache_fresh_locked(tmp, new->expiry_time); 184 cache_fresh_locked(tmp, new->expiry_time, detail);
177 cache_fresh_locked(old, 0); 185 cache_fresh_locked(old, 0, detail);
178 write_unlock(&detail->hash_lock); 186 write_unlock(&detail->hash_lock);
179 cache_fresh_unlocked(tmp, detail); 187 cache_fresh_unlocked(tmp, detail);
180 cache_fresh_unlocked(old, detail); 188 cache_fresh_unlocked(old, detail);
@@ -219,7 +227,8 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h
219 rv = cache_is_valid(h); 227 rv = cache_is_valid(h);
220 if (rv == -EAGAIN) { 228 if (rv == -EAGAIN) {
221 set_bit(CACHE_NEGATIVE, &h->flags); 229 set_bit(CACHE_NEGATIVE, &h->flags);
222 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); 230 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
231 detail);
223 rv = -ENOENT; 232 rv = -ENOENT;
224 } 233 }
225 write_unlock(&detail->hash_lock); 234 write_unlock(&detail->hash_lock);
@@ -487,10 +496,13 @@ EXPORT_SYMBOL_GPL(cache_flush);
487 496
488void cache_purge(struct cache_detail *detail) 497void cache_purge(struct cache_detail *detail)
489{ 498{
490 detail->flush_time = LONG_MAX; 499 time_t now = seconds_since_boot();
500 if (detail->flush_time >= now)
501 now = detail->flush_time + 1;
502 /* 'now' is the maximum value any 'last_refresh' can have */
503 detail->flush_time = now;
491 detail->nextcheck = seconds_since_boot(); 504 detail->nextcheck = seconds_since_boot();
492 cache_flush(); 505 cache_flush();
493 detail->flush_time = 1;
494} 506}
495EXPORT_SYMBOL_GPL(cache_purge); 507EXPORT_SYMBOL_GPL(cache_purge);
496 508
@@ -1436,6 +1448,7 @@ static ssize_t write_flush(struct file *file, const char __user *buf,
1436{ 1448{
1437 char tbuf[20]; 1449 char tbuf[20];
1438 char *bp, *ep; 1450 char *bp, *ep;
1451 time_t then, now;
1439 1452
1440 if (*ppos || count > sizeof(tbuf)-1) 1453 if (*ppos || count > sizeof(tbuf)-1)
1441 return -EINVAL; 1454 return -EINVAL;
@@ -1447,8 +1460,22 @@ static ssize_t write_flush(struct file *file, const char __user *buf,
1447 return -EINVAL; 1460 return -EINVAL;
1448 1461
1449 bp = tbuf; 1462 bp = tbuf;
1450 cd->flush_time = get_expiry(&bp); 1463 then = get_expiry(&bp);
1451 cd->nextcheck = seconds_since_boot(); 1464 now = seconds_since_boot();
1465 cd->nextcheck = now;
1466 /* Can only set flush_time to 1 second beyond "now", or
1467 * possibly 1 second beyond flushtime. This is because
1468 * flush_time never goes backwards so it mustn't get too far
1469 * ahead of time.
1470 */
1471 if (then >= now) {
1472 /* Want to flush everything, so behave like cache_purge() */
1473 if (cd->flush_time >= now)
1474 now = cd->flush_time + 1;
1475 then = now;
1476 }
1477
1478 cd->flush_time = then;
1452 cache_flush(); 1479 cache_flush();
1453 1480
1454 *ppos += count; 1481 *ppos += count;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 0c8120229a03..1413cdcc131c 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -181,7 +181,7 @@ int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
181 struct page **ppage = xdr->pages; 181 struct page **ppage = xdr->pages;
182 size_t base = xdr->page_base; 182 size_t base = xdr->page_base;
183 unsigned int pglen = xdr->page_len; 183 unsigned int pglen = xdr->page_len;
184 unsigned int flags = MSG_MORE; 184 unsigned int flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
185 int slen; 185 int slen;
186 int len = 0; 186 int len = 0;
187 187
@@ -399,6 +399,31 @@ static int svc_sock_secure_port(struct svc_rqst *rqstp)
399 return svc_port_is_privileged(svc_addr(rqstp)); 399 return svc_port_is_privileged(svc_addr(rqstp));
400} 400}
401 401
402static bool sunrpc_waitqueue_active(wait_queue_head_t *wq)
403{
404 if (!wq)
405 return false;
406 /*
407 * There should normally be a memory * barrier here--see
408 * wq_has_sleeper().
409 *
410 * It appears that isn't currently necessary, though, basically
411 * because callers all appear to have sufficient memory barriers
412 * between the time the relevant change is made and the
413 * time they call these callbacks.
414 *
415 * The nfsd code itself doesn't actually explicitly wait on
416 * these waitqueues, but it may wait on them for example in
417 * sendpage() or sendmsg() calls. (And those may be the only
418 * places, since it it uses nonblocking reads.)
419 *
420 * Maybe we should add the memory barriers anyway, but these are
421 * hot paths so we'd need to be convinced there's no sigificant
422 * penalty.
423 */
424 return waitqueue_active(wq);
425}
426
402/* 427/*
403 * INET callback when data has been received on the socket. 428 * INET callback when data has been received on the socket.
404 */ 429 */
@@ -414,7 +439,7 @@ static void svc_udp_data_ready(struct sock *sk)
414 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 439 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
415 svc_xprt_enqueue(&svsk->sk_xprt); 440 svc_xprt_enqueue(&svsk->sk_xprt);
416 } 441 }
417 if (wq && waitqueue_active(wq)) 442 if (sunrpc_waitqueue_active(wq))
418 wake_up_interruptible(wq); 443 wake_up_interruptible(wq);
419} 444}
420 445
@@ -432,7 +457,7 @@ static void svc_write_space(struct sock *sk)
432 svc_xprt_enqueue(&svsk->sk_xprt); 457 svc_xprt_enqueue(&svsk->sk_xprt);
433 } 458 }
434 459
435 if (wq && waitqueue_active(wq)) { 460 if (sunrpc_waitqueue_active(wq)) {
436 dprintk("RPC svc_write_space: someone sleeping on %p\n", 461 dprintk("RPC svc_write_space: someone sleeping on %p\n",
437 svsk); 462 svsk);
438 wake_up_interruptible(wq); 463 wake_up_interruptible(wq);
@@ -787,7 +812,7 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
787 } 812 }
788 813
789 wq = sk_sleep(sk); 814 wq = sk_sleep(sk);
790 if (wq && waitqueue_active(wq)) 815 if (sunrpc_waitqueue_active(wq))
791 wake_up_interruptible_all(wq); 816 wake_up_interruptible_all(wq);
792} 817}
793 818
@@ -808,7 +833,7 @@ static void svc_tcp_state_change(struct sock *sk)
808 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 833 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
809 svc_xprt_enqueue(&svsk->sk_xprt); 834 svc_xprt_enqueue(&svsk->sk_xprt);
810 } 835 }
811 if (wq && waitqueue_active(wq)) 836 if (sunrpc_waitqueue_active(wq))
812 wake_up_interruptible_all(wq); 837 wake_up_interruptible_all(wq);
813} 838}
814 839
@@ -823,7 +848,7 @@ static void svc_tcp_data_ready(struct sock *sk)
823 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 848 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
824 svc_xprt_enqueue(&svsk->sk_xprt); 849 svc_xprt_enqueue(&svsk->sk_xprt);
825 } 850 }
826 if (wq && waitqueue_active(wq)) 851 if (sunrpc_waitqueue_active(wq))
827 wake_up_interruptible(wq); 852 wake_up_interruptible(wq);
828} 853}
829 854
@@ -1367,7 +1392,6 @@ EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
1367 1392
1368/* 1393/*
1369 * Initialize socket for RPC use and create svc_sock struct 1394 * Initialize socket for RPC use and create svc_sock struct
1370 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1371 */ 1395 */
1372static struct svc_sock *svc_setup_socket(struct svc_serv *serv, 1396static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1373 struct socket *sock, 1397 struct socket *sock,
@@ -1594,7 +1618,7 @@ static void svc_sock_detach(struct svc_xprt *xprt)
1594 sk->sk_write_space = svsk->sk_owspace; 1618 sk->sk_write_space = svsk->sk_owspace;
1595 1619
1596 wq = sk_sleep(sk); 1620 wq = sk_sleep(sk);
1597 if (wq && waitqueue_active(wq)) 1621 if (sunrpc_waitqueue_active(wq))
1598 wake_up_interruptible(wq); 1622 wake_up_interruptible(wq);
1599} 1623}
1600 1624
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 9dc239dfe192..e401108360a2 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -332,131 +332,15 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
332 tipc_sk_rcv(net, inputq); 332 tipc_sk_rcv(net, inputq);
333} 333}
334 334
335static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
336 struct tipc_stats *stats)
337{
338 int i;
339 struct nlattr *nest;
340
341 struct nla_map {
342 __u32 key;
343 __u32 val;
344 };
345
346 struct nla_map map[] = {
347 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
348 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
349 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
350 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
351 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
352 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
353 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
354 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
355 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
356 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
357 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
358 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
359 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
360 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
361 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
362 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
363 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
364 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
365 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
366 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
367 };
368
369 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
370 if (!nest)
371 return -EMSGSIZE;
372
373 for (i = 0; i < ARRAY_SIZE(map); i++)
374 if (nla_put_u32(skb, map[i].key, map[i].val))
375 goto msg_full;
376
377 nla_nest_end(skb, nest);
378
379 return 0;
380msg_full:
381 nla_nest_cancel(skb, nest);
382
383 return -EMSGSIZE;
384}
385
386int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
387{
388 int err;
389 void *hdr;
390 struct nlattr *attrs;
391 struct nlattr *prop;
392 struct tipc_net *tn = net_generic(net, tipc_net_id);
393 struct tipc_link *bcl = tn->bcl;
394
395 if (!bcl)
396 return 0;
397
398 tipc_bcast_lock(net);
399
400 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
401 NLM_F_MULTI, TIPC_NL_LINK_GET);
402 if (!hdr)
403 return -EMSGSIZE;
404
405 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
406 if (!attrs)
407 goto msg_full;
408
409 /* The broadcast link is always up */
410 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
411 goto attr_msg_full;
412
413 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
414 goto attr_msg_full;
415 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
416 goto attr_msg_full;
417 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
418 goto attr_msg_full;
419 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
420 goto attr_msg_full;
421
422 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
423 if (!prop)
424 goto attr_msg_full;
425 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
426 goto prop_msg_full;
427 nla_nest_end(msg->skb, prop);
428
429 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
430 if (err)
431 goto attr_msg_full;
432
433 tipc_bcast_unlock(net);
434 nla_nest_end(msg->skb, attrs);
435 genlmsg_end(msg->skb, hdr);
436
437 return 0;
438
439prop_msg_full:
440 nla_nest_cancel(msg->skb, prop);
441attr_msg_full:
442 nla_nest_cancel(msg->skb, attrs);
443msg_full:
444 tipc_bcast_unlock(net);
445 genlmsg_cancel(msg->skb, hdr);
446
447 return -EMSGSIZE;
448}
449
450int tipc_bclink_reset_stats(struct net *net) 335int tipc_bclink_reset_stats(struct net *net)
451{ 336{
452 struct tipc_net *tn = net_generic(net, tipc_net_id); 337 struct tipc_link *l = tipc_bc_sndlink(net);
453 struct tipc_link *bcl = tn->bcl;
454 338
455 if (!bcl) 339 if (!l)
456 return -ENOPROTOOPT; 340 return -ENOPROTOOPT;
457 341
458 tipc_bcast_lock(net); 342 tipc_bcast_lock(net);
459 memset(&bcl->stats, 0, sizeof(bcl->stats)); 343 tipc_link_reset_stats(l);
460 tipc_bcast_unlock(net); 344 tipc_bcast_unlock(net);
461 return 0; 345 return 0;
462} 346}
@@ -530,9 +414,7 @@ enomem:
530 414
531void tipc_bcast_reinit(struct net *net) 415void tipc_bcast_reinit(struct net *net)
532{ 416{
533 struct tipc_bc_base *b = tipc_bc_base(net); 417 tipc_link_reinit(tipc_bc_sndlink(net), tipc_own_addr(net));
534
535 msg_set_prevnode(b->link->pmsg, tipc_own_addr(net));
536} 418}
537 419
538void tipc_bcast_stop(struct net *net) 420void tipc_bcast_stop(struct net *net)
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 2855b9356a15..1944c6c00bb9 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -43,6 +43,7 @@ struct tipc_node;
43struct tipc_msg; 43struct tipc_msg;
44struct tipc_nl_msg; 44struct tipc_nl_msg;
45struct tipc_node_map; 45struct tipc_node_map;
46extern const char tipc_bclink_name[];
46 47
47int tipc_bcast_init(struct net *net); 48int tipc_bcast_init(struct net *net);
48void tipc_bcast_reinit(struct net *net); 49void tipc_bcast_reinit(struct net *net);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 648f2a67f314..802ffad3200d 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -71,7 +71,7 @@ static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
71 [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED } 71 [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED }
72}; 72};
73 73
74static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr); 74static void bearer_disable(struct net *net, struct tipc_bearer *b);
75 75
76/** 76/**
77 * tipc_media_find - locates specified media object by name 77 * tipc_media_find - locates specified media object by name
@@ -107,13 +107,13 @@ static struct tipc_media *media_find_id(u8 type)
107void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a) 107void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
108{ 108{
109 char addr_str[MAX_ADDR_STR]; 109 char addr_str[MAX_ADDR_STR];
110 struct tipc_media *m_ptr; 110 struct tipc_media *m;
111 int ret; 111 int ret;
112 112
113 m_ptr = media_find_id(a->media_id); 113 m = media_find_id(a->media_id);
114 114
115 if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str))) 115 if (m && !m->addr2str(a, addr_str, sizeof(addr_str)))
116 ret = scnprintf(buf, len, "%s(%s)", m_ptr->name, addr_str); 116 ret = scnprintf(buf, len, "%s(%s)", m->name, addr_str);
117 else { 117 else {
118 u32 i; 118 u32 i;
119 119
@@ -175,13 +175,13 @@ static int bearer_name_validate(const char *name,
175struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name) 175struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name)
176{ 176{
177 struct tipc_net *tn = net_generic(net, tipc_net_id); 177 struct tipc_net *tn = net_generic(net, tipc_net_id);
178 struct tipc_bearer *b_ptr; 178 struct tipc_bearer *b;
179 u32 i; 179 u32 i;
180 180
181 for (i = 0; i < MAX_BEARERS; i++) { 181 for (i = 0; i < MAX_BEARERS; i++) {
182 b_ptr = rtnl_dereference(tn->bearer_list[i]); 182 b = rtnl_dereference(tn->bearer_list[i]);
183 if (b_ptr && (!strcmp(b_ptr->name, name))) 183 if (b && (!strcmp(b->name, name)))
184 return b_ptr; 184 return b;
185 } 185 }
186 return NULL; 186 return NULL;
187} 187}
@@ -189,24 +189,24 @@ struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name)
189void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest) 189void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest)
190{ 190{
191 struct tipc_net *tn = net_generic(net, tipc_net_id); 191 struct tipc_net *tn = net_generic(net, tipc_net_id);
192 struct tipc_bearer *b_ptr; 192 struct tipc_bearer *b;
193 193
194 rcu_read_lock(); 194 rcu_read_lock();
195 b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); 195 b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
196 if (b_ptr) 196 if (b)
197 tipc_disc_add_dest(b_ptr->link_req); 197 tipc_disc_add_dest(b->link_req);
198 rcu_read_unlock(); 198 rcu_read_unlock();
199} 199}
200 200
201void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest) 201void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
202{ 202{
203 struct tipc_net *tn = net_generic(net, tipc_net_id); 203 struct tipc_net *tn = net_generic(net, tipc_net_id);
204 struct tipc_bearer *b_ptr; 204 struct tipc_bearer *b;
205 205
206 rcu_read_lock(); 206 rcu_read_lock();
207 b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); 207 b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
208 if (b_ptr) 208 if (b)
209 tipc_disc_remove_dest(b_ptr->link_req); 209 tipc_disc_remove_dest(b->link_req);
210 rcu_read_unlock(); 210 rcu_read_unlock();
211} 211}
212 212
@@ -218,8 +218,8 @@ static int tipc_enable_bearer(struct net *net, const char *name,
218 struct nlattr *attr[]) 218 struct nlattr *attr[])
219{ 219{
220 struct tipc_net *tn = net_generic(net, tipc_net_id); 220 struct tipc_net *tn = net_generic(net, tipc_net_id);
221 struct tipc_bearer *b_ptr; 221 struct tipc_bearer *b;
222 struct tipc_media *m_ptr; 222 struct tipc_media *m;
223 struct tipc_bearer_names b_names; 223 struct tipc_bearer_names b_names;
224 char addr_string[16]; 224 char addr_string[16];
225 u32 bearer_id; 225 u32 bearer_id;
@@ -255,31 +255,31 @@ static int tipc_enable_bearer(struct net *net, const char *name,
255 return -EINVAL; 255 return -EINVAL;
256 } 256 }
257 257
258 m_ptr = tipc_media_find(b_names.media_name); 258 m = tipc_media_find(b_names.media_name);
259 if (!m_ptr) { 259 if (!m) {
260 pr_warn("Bearer <%s> rejected, media <%s> not registered\n", 260 pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
261 name, b_names.media_name); 261 name, b_names.media_name);
262 return -EINVAL; 262 return -EINVAL;
263 } 263 }
264 264
265 if (priority == TIPC_MEDIA_LINK_PRI) 265 if (priority == TIPC_MEDIA_LINK_PRI)
266 priority = m_ptr->priority; 266 priority = m->priority;
267 267
268restart: 268restart:
269 bearer_id = MAX_BEARERS; 269 bearer_id = MAX_BEARERS;
270 with_this_prio = 1; 270 with_this_prio = 1;
271 for (i = MAX_BEARERS; i-- != 0; ) { 271 for (i = MAX_BEARERS; i-- != 0; ) {
272 b_ptr = rtnl_dereference(tn->bearer_list[i]); 272 b = rtnl_dereference(tn->bearer_list[i]);
273 if (!b_ptr) { 273 if (!b) {
274 bearer_id = i; 274 bearer_id = i;
275 continue; 275 continue;
276 } 276 }
277 if (!strcmp(name, b_ptr->name)) { 277 if (!strcmp(name, b->name)) {
278 pr_warn("Bearer <%s> rejected, already enabled\n", 278 pr_warn("Bearer <%s> rejected, already enabled\n",
279 name); 279 name);
280 return -EINVAL; 280 return -EINVAL;
281 } 281 }
282 if ((b_ptr->priority == priority) && 282 if ((b->priority == priority) &&
283 (++with_this_prio > 2)) { 283 (++with_this_prio > 2)) {
284 if (priority-- == 0) { 284 if (priority-- == 0) {
285 pr_warn("Bearer <%s> rejected, duplicate priority\n", 285 pr_warn("Bearer <%s> rejected, duplicate priority\n",
@@ -297,35 +297,35 @@ restart:
297 return -EINVAL; 297 return -EINVAL;
298 } 298 }
299 299
300 b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC); 300 b = kzalloc(sizeof(*b), GFP_ATOMIC);
301 if (!b_ptr) 301 if (!b)
302 return -ENOMEM; 302 return -ENOMEM;
303 303
304 strcpy(b_ptr->name, name); 304 strcpy(b->name, name);
305 b_ptr->media = m_ptr; 305 b->media = m;
306 res = m_ptr->enable_media(net, b_ptr, attr); 306 res = m->enable_media(net, b, attr);
307 if (res) { 307 if (res) {
308 pr_warn("Bearer <%s> rejected, enable failure (%d)\n", 308 pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
309 name, -res); 309 name, -res);
310 return -EINVAL; 310 return -EINVAL;
311 } 311 }
312 312
313 b_ptr->identity = bearer_id; 313 b->identity = bearer_id;
314 b_ptr->tolerance = m_ptr->tolerance; 314 b->tolerance = m->tolerance;
315 b_ptr->window = m_ptr->window; 315 b->window = m->window;
316 b_ptr->domain = disc_domain; 316 b->domain = disc_domain;
317 b_ptr->net_plane = bearer_id + 'A'; 317 b->net_plane = bearer_id + 'A';
318 b_ptr->priority = priority; 318 b->priority = priority;
319 319
320 res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr); 320 res = tipc_disc_create(net, b, &b->bcast_addr);
321 if (res) { 321 if (res) {
322 bearer_disable(net, b_ptr); 322 bearer_disable(net, b);
323 pr_warn("Bearer <%s> rejected, discovery object creation failed\n", 323 pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
324 name); 324 name);
325 return -EINVAL; 325 return -EINVAL;
326 } 326 }
327 327
328 rcu_assign_pointer(tn->bearer_list[bearer_id], b_ptr); 328 rcu_assign_pointer(tn->bearer_list[bearer_id], b);
329 329
330 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 330 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
331 name, 331 name,
@@ -336,11 +336,11 @@ restart:
336/** 336/**
337 * tipc_reset_bearer - Reset all links established over this bearer 337 * tipc_reset_bearer - Reset all links established over this bearer
338 */ 338 */
339static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr) 339static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
340{ 340{
341 pr_info("Resetting bearer <%s>\n", b_ptr->name); 341 pr_info("Resetting bearer <%s>\n", b->name);
342 tipc_node_delete_links(net, b_ptr->identity); 342 tipc_node_delete_links(net, b->identity);
343 tipc_disc_reset(net, b_ptr); 343 tipc_disc_reset(net, b);
344 return 0; 344 return 0;
345} 345}
346 346
@@ -349,26 +349,26 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
349 * 349 *
350 * Note: This routine assumes caller holds RTNL lock. 350 * Note: This routine assumes caller holds RTNL lock.
351 */ 351 */
352static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr) 352static void bearer_disable(struct net *net, struct tipc_bearer *b)
353{ 353{
354 struct tipc_net *tn = net_generic(net, tipc_net_id); 354 struct tipc_net *tn = net_generic(net, tipc_net_id);
355 u32 i; 355 u32 i;
356 356
357 pr_info("Disabling bearer <%s>\n", b_ptr->name); 357 pr_info("Disabling bearer <%s>\n", b->name);
358 b_ptr->media->disable_media(b_ptr); 358 b->media->disable_media(b);
359 359
360 tipc_node_delete_links(net, b_ptr->identity); 360 tipc_node_delete_links(net, b->identity);
361 RCU_INIT_POINTER(b_ptr->media_ptr, NULL); 361 RCU_INIT_POINTER(b->media_ptr, NULL);
362 if (b_ptr->link_req) 362 if (b->link_req)
363 tipc_disc_delete(b_ptr->link_req); 363 tipc_disc_delete(b->link_req);
364 364
365 for (i = 0; i < MAX_BEARERS; i++) { 365 for (i = 0; i < MAX_BEARERS; i++) {
366 if (b_ptr == rtnl_dereference(tn->bearer_list[i])) { 366 if (b == rtnl_dereference(tn->bearer_list[i])) {
367 RCU_INIT_POINTER(tn->bearer_list[i], NULL); 367 RCU_INIT_POINTER(tn->bearer_list[i], NULL);
368 break; 368 break;
369 } 369 }
370 } 370 }
371 kfree_rcu(b_ptr, rcu); 371 kfree_rcu(b, rcu);
372} 372}
373 373
374int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, 374int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
@@ -411,7 +411,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
411/** 411/**
412 * tipc_l2_send_msg - send a TIPC packet out over an L2 interface 412 * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
413 * @buf: the packet to be sent 413 * @buf: the packet to be sent
414 * @b_ptr: the bearer through which the packet is to be sent 414 * @b: the bearer through which the packet is to be sent
415 * @dest: peer destination address 415 * @dest: peer destination address
416 */ 416 */
417int tipc_l2_send_msg(struct net *net, struct sk_buff *skb, 417int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
@@ -532,14 +532,14 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
532static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev, 532static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
533 struct packet_type *pt, struct net_device *orig_dev) 533 struct packet_type *pt, struct net_device *orig_dev)
534{ 534{
535 struct tipc_bearer *b_ptr; 535 struct tipc_bearer *b;
536 536
537 rcu_read_lock(); 537 rcu_read_lock();
538 b_ptr = rcu_dereference_rtnl(dev->tipc_ptr); 538 b = rcu_dereference_rtnl(dev->tipc_ptr);
539 if (likely(b_ptr)) { 539 if (likely(b)) {
540 if (likely(buf->pkt_type <= PACKET_BROADCAST)) { 540 if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
541 buf->next = NULL; 541 buf->next = NULL;
542 tipc_rcv(dev_net(dev), buf, b_ptr); 542 tipc_rcv(dev_net(dev), buf, b);
543 rcu_read_unlock(); 543 rcu_read_unlock();
544 return NET_RX_SUCCESS; 544 return NET_RX_SUCCESS;
545 } 545 }
@@ -564,13 +564,13 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
564{ 564{
565 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 565 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
566 struct net *net = dev_net(dev); 566 struct net *net = dev_net(dev);
567 struct tipc_bearer *b_ptr; 567 struct tipc_bearer *b;
568 568
569 b_ptr = rtnl_dereference(dev->tipc_ptr); 569 b = rtnl_dereference(dev->tipc_ptr);
570 if (!b_ptr) 570 if (!b)
571 return NOTIFY_DONE; 571 return NOTIFY_DONE;
572 572
573 b_ptr->mtu = dev->mtu; 573 b->mtu = dev->mtu;
574 574
575 switch (evt) { 575 switch (evt) {
576 case NETDEV_CHANGE: 576 case NETDEV_CHANGE:
@@ -578,16 +578,16 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
578 break; 578 break;
579 case NETDEV_GOING_DOWN: 579 case NETDEV_GOING_DOWN:
580 case NETDEV_CHANGEMTU: 580 case NETDEV_CHANGEMTU:
581 tipc_reset_bearer(net, b_ptr); 581 tipc_reset_bearer(net, b);
582 break; 582 break;
583 case NETDEV_CHANGEADDR: 583 case NETDEV_CHANGEADDR:
584 b_ptr->media->raw2addr(b_ptr, &b_ptr->addr, 584 b->media->raw2addr(b, &b->addr,
585 (char *)dev->dev_addr); 585 (char *)dev->dev_addr);
586 tipc_reset_bearer(net, b_ptr); 586 tipc_reset_bearer(net, b);
587 break; 587 break;
588 case NETDEV_UNREGISTER: 588 case NETDEV_UNREGISTER:
589 case NETDEV_CHANGENAME: 589 case NETDEV_CHANGENAME:
590 bearer_disable(dev_net(dev), b_ptr); 590 bearer_disable(dev_net(dev), b);
591 break; 591 break;
592 } 592 }
593 return NOTIFY_OK; 593 return NOTIFY_OK;
@@ -623,13 +623,13 @@ void tipc_bearer_cleanup(void)
623void tipc_bearer_stop(struct net *net) 623void tipc_bearer_stop(struct net *net)
624{ 624{
625 struct tipc_net *tn = net_generic(net, tipc_net_id); 625 struct tipc_net *tn = net_generic(net, tipc_net_id);
626 struct tipc_bearer *b_ptr; 626 struct tipc_bearer *b;
627 u32 i; 627 u32 i;
628 628
629 for (i = 0; i < MAX_BEARERS; i++) { 629 for (i = 0; i < MAX_BEARERS; i++) {
630 b_ptr = rtnl_dereference(tn->bearer_list[i]); 630 b = rtnl_dereference(tn->bearer_list[i]);
631 if (b_ptr) { 631 if (b) {
632 bearer_disable(net, b_ptr); 632 bearer_disable(net, b);
633 tn->bearer_list[i] = NULL; 633 tn->bearer_list[i] = NULL;
634 } 634 }
635 } 635 }
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 552185bc4773..e31820516774 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -103,11 +103,11 @@ struct tipc_bearer;
103 */ 103 */
104struct tipc_media { 104struct tipc_media {
105 int (*send_msg)(struct net *net, struct sk_buff *buf, 105 int (*send_msg)(struct net *net, struct sk_buff *buf,
106 struct tipc_bearer *b_ptr, 106 struct tipc_bearer *b,
107 struct tipc_media_addr *dest); 107 struct tipc_media_addr *dest);
108 int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr, 108 int (*enable_media)(struct net *net, struct tipc_bearer *b,
109 struct nlattr *attr[]); 109 struct nlattr *attr[]);
110 void (*disable_media)(struct tipc_bearer *b_ptr); 110 void (*disable_media)(struct tipc_bearer *b);
111 int (*addr2str)(struct tipc_media_addr *addr, 111 int (*addr2str)(struct tipc_media_addr *addr,
112 char *strbuf, 112 char *strbuf,
113 int bufsz); 113 int bufsz);
@@ -176,7 +176,7 @@ struct tipc_bearer_names {
176 * TIPC routines available to supported media types 176 * TIPC routines available to supported media types
177 */ 177 */
178 178
179void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr); 179void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b);
180 180
181/* 181/*
182 * Routines made available to TIPC by supported media types 182 * Routines made available to TIPC by supported media types
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 18e95a8020cd..5504d63503df 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -118,6 +118,11 @@ static inline int tipc_netid(struct net *net)
118 return tipc_net(net)->net_id; 118 return tipc_net(net)->net_id;
119} 119}
120 120
121static inline struct list_head *tipc_nodes(struct net *net)
122{
123 return &tipc_net(net)->node_list;
124}
125
121static inline u16 mod(u16 x) 126static inline u16 mod(u16 x)
122{ 127{
123 return x & 0xffffu; 128 return x & 0xffffu;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index afe8c47c4085..f1e738e80535 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -75,14 +75,14 @@ struct tipc_link_req {
75 * tipc_disc_init_msg - initialize a link setup message 75 * tipc_disc_init_msg - initialize a link setup message
76 * @net: the applicable net namespace 76 * @net: the applicable net namespace
77 * @type: message type (request or response) 77 * @type: message type (request or response)
78 * @b_ptr: ptr to bearer issuing message 78 * @b: ptr to bearer issuing message
79 */ 79 */
80static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type, 80static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
81 struct tipc_bearer *b_ptr) 81 struct tipc_bearer *b)
82{ 82{
83 struct tipc_net *tn = net_generic(net, tipc_net_id); 83 struct tipc_net *tn = net_generic(net, tipc_net_id);
84 struct tipc_msg *msg; 84 struct tipc_msg *msg;
85 u32 dest_domain = b_ptr->domain; 85 u32 dest_domain = b->domain;
86 86
87 msg = buf_msg(buf); 87 msg = buf_msg(buf);
88 tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type, 88 tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type,
@@ -92,16 +92,16 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
92 msg_set_node_capabilities(msg, TIPC_NODE_CAPABILITIES); 92 msg_set_node_capabilities(msg, TIPC_NODE_CAPABILITIES);
93 msg_set_dest_domain(msg, dest_domain); 93 msg_set_dest_domain(msg, dest_domain);
94 msg_set_bc_netid(msg, tn->net_id); 94 msg_set_bc_netid(msg, tn->net_id);
95 b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr); 95 b->media->addr2msg(msg_media_addr(msg), &b->addr);
96} 96}
97 97
98/** 98/**
99 * disc_dupl_alert - issue node address duplication alert 99 * disc_dupl_alert - issue node address duplication alert
100 * @b_ptr: pointer to bearer detecting duplication 100 * @b: pointer to bearer detecting duplication
101 * @node_addr: duplicated node address 101 * @node_addr: duplicated node address
102 * @media_addr: media address advertised by duplicated node 102 * @media_addr: media address advertised by duplicated node
103 */ 103 */
104static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr, 104static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr,
105 struct tipc_media_addr *media_addr) 105 struct tipc_media_addr *media_addr)
106{ 106{
107 char node_addr_str[16]; 107 char node_addr_str[16];
@@ -111,7 +111,7 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
111 tipc_media_addr_printf(media_addr_str, sizeof(media_addr_str), 111 tipc_media_addr_printf(media_addr_str, sizeof(media_addr_str),
112 media_addr); 112 media_addr);
113 pr_warn("Duplicate %s using %s seen on <%s>\n", node_addr_str, 113 pr_warn("Duplicate %s using %s seen on <%s>\n", node_addr_str,
114 media_addr_str, b_ptr->name); 114 media_addr_str, b->name);
115} 115}
116 116
117/** 117/**
@@ -261,13 +261,13 @@ exit:
261/** 261/**
262 * tipc_disc_create - create object to send periodic link setup requests 262 * tipc_disc_create - create object to send periodic link setup requests
263 * @net: the applicable net namespace 263 * @net: the applicable net namespace
264 * @b_ptr: ptr to bearer issuing requests 264 * @b: ptr to bearer issuing requests
265 * @dest: destination address for request messages 265 * @dest: destination address for request messages
266 * @dest_domain: network domain to which links can be established 266 * @dest_domain: network domain to which links can be established
267 * 267 *
268 * Returns 0 if successful, otherwise -errno. 268 * Returns 0 if successful, otherwise -errno.
269 */ 269 */
270int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr, 270int tipc_disc_create(struct net *net, struct tipc_bearer *b,
271 struct tipc_media_addr *dest) 271 struct tipc_media_addr *dest)
272{ 272{
273 struct tipc_link_req *req; 273 struct tipc_link_req *req;
@@ -282,17 +282,17 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
282 return -ENOMEM; 282 return -ENOMEM;
283 } 283 }
284 284
285 tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr); 285 tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b);
286 memcpy(&req->dest, dest, sizeof(*dest)); 286 memcpy(&req->dest, dest, sizeof(*dest));
287 req->net = net; 287 req->net = net;
288 req->bearer_id = b_ptr->identity; 288 req->bearer_id = b->identity;
289 req->domain = b_ptr->domain; 289 req->domain = b->domain;
290 req->num_nodes = 0; 290 req->num_nodes = 0;
291 req->timer_intv = TIPC_LINK_REQ_INIT; 291 req->timer_intv = TIPC_LINK_REQ_INIT;
292 spin_lock_init(&req->lock); 292 spin_lock_init(&req->lock);
293 setup_timer(&req->timer, disc_timeout, (unsigned long)req); 293 setup_timer(&req->timer, disc_timeout, (unsigned long)req);
294 mod_timer(&req->timer, jiffies + req->timer_intv); 294 mod_timer(&req->timer, jiffies + req->timer_intv);
295 b_ptr->link_req = req; 295 b->link_req = req;
296 skb = skb_clone(req->buf, GFP_ATOMIC); 296 skb = skb_clone(req->buf, GFP_ATOMIC);
297 if (skb) 297 if (skb)
298 tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest); 298 tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest);
@@ -313,19 +313,19 @@ void tipc_disc_delete(struct tipc_link_req *req)
313/** 313/**
314 * tipc_disc_reset - reset object to send periodic link setup requests 314 * tipc_disc_reset - reset object to send periodic link setup requests
315 * @net: the applicable net namespace 315 * @net: the applicable net namespace
316 * @b_ptr: ptr to bearer issuing requests 316 * @b: ptr to bearer issuing requests
317 * @dest_domain: network domain to which links can be established 317 * @dest_domain: network domain to which links can be established
318 */ 318 */
319void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr) 319void tipc_disc_reset(struct net *net, struct tipc_bearer *b)
320{ 320{
321 struct tipc_link_req *req = b_ptr->link_req; 321 struct tipc_link_req *req = b->link_req;
322 struct sk_buff *skb; 322 struct sk_buff *skb;
323 323
324 spin_lock_bh(&req->lock); 324 spin_lock_bh(&req->lock);
325 tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr); 325 tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b);
326 req->net = net; 326 req->net = net;
327 req->bearer_id = b_ptr->identity; 327 req->bearer_id = b->identity;
328 req->domain = b_ptr->domain; 328 req->domain = b->domain;
329 req->num_nodes = 0; 329 req->num_nodes = 0;
330 req->timer_intv = TIPC_LINK_REQ_INIT; 330 req->timer_intv = TIPC_LINK_REQ_INIT;
331 mod_timer(&req->timer, jiffies + req->timer_intv); 331 mod_timer(&req->timer, jiffies + req->timer_intv);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 9efbdbde2b08..b11afe71dfc1 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -45,28 +45,156 @@
45 45
46#include <linux/pkt_sched.h> 46#include <linux/pkt_sched.h>
47 47
48struct tipc_stats {
49 u32 sent_info; /* used in counting # sent packets */
50 u32 recv_info; /* used in counting # recv'd packets */
51 u32 sent_states;
52 u32 recv_states;
53 u32 sent_probes;
54 u32 recv_probes;
55 u32 sent_nacks;
56 u32 recv_nacks;
57 u32 sent_acks;
58 u32 sent_bundled;
59 u32 sent_bundles;
60 u32 recv_bundled;
61 u32 recv_bundles;
62 u32 retransmitted;
63 u32 sent_fragmented;
64 u32 sent_fragments;
65 u32 recv_fragmented;
66 u32 recv_fragments;
67 u32 link_congs; /* # port sends blocked by congestion */
68 u32 deferred_recv;
69 u32 duplicates;
70 u32 max_queue_sz; /* send queue size high water mark */
71 u32 accu_queue_sz; /* used for send queue size profiling */
72 u32 queue_sz_counts; /* used for send queue size profiling */
73 u32 msg_length_counts; /* used for message length profiling */
74 u32 msg_lengths_total; /* used for message length profiling */
75 u32 msg_length_profile[7]; /* used for msg. length profiling */
76};
77
78/**
79 * struct tipc_link - TIPC link data structure
80 * @addr: network address of link's peer node
81 * @name: link name character string
82 * @media_addr: media address to use when sending messages over link
83 * @timer: link timer
84 * @net: pointer to namespace struct
85 * @refcnt: reference counter for permanent references (owner node & timer)
86 * @peer_session: link session # being used by peer end of link
87 * @peer_bearer_id: bearer id used by link's peer endpoint
88 * @bearer_id: local bearer id used by link
89 * @tolerance: minimum link continuity loss needed to reset link [in ms]
90 * @keepalive_intv: link keepalive timer interval
91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
99 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
100 * @exp_msg_count: # of tunnelled messages expected during link changeover
101 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
102 * @mtu: current maximum packet size for this link
103 * @advertised_mtu: advertised own mtu when link is being established
104 * @transmitq: queue for sent, non-acked messages
105 * @backlogq: queue for messages waiting to be sent
106 * @snt_nxt: next sequence number to use for outbound messages
107 * @last_retransmitted: sequence number of most recently retransmitted message
108 * @stale_count: # of identical retransmit requests made by peer
109 * @ackers: # of peers that needs to ack each packet before it can be released
110 * @acked: # last packet acked by a certain peer. Used for broadcast.
111 * @rcv_nxt: next sequence number to expect for inbound messages
112 * @deferred_queue: deferred queue saved OOS b'cast message received from node
113 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
114 * @inputq: buffer queue for messages to be delivered upwards
115 * @namedq: buffer queue for name table messages to be delivered upwards
116 * @next_out: ptr to first unsent outbound message in queue
117 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
118 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
119 * @reasm_buf: head of partially reassembled inbound message fragments
120 * @bc_rcvr: marks that this is a broadcast receiver link
121 * @stats: collects statistics regarding link activity
122 */
123struct tipc_link {
124 u32 addr;
125 char name[TIPC_MAX_LINK_NAME];
126 struct tipc_media_addr *media_addr;
127 struct net *net;
128
129 /* Management and link supervision data */
130 u32 peer_session;
131 u32 peer_bearer_id;
132 u32 bearer_id;
133 u32 tolerance;
134 unsigned long keepalive_intv;
135 u32 abort_limit;
136 u32 state;
137 u16 peer_caps;
138 bool active;
139 u32 silent_intv_cnt;
140 struct {
141 unchar hdr[INT_H_SIZE];
142 unchar body[TIPC_MAX_IF_NAME];
143 } proto_msg;
144 struct tipc_msg *pmsg;
145 u32 priority;
146 char net_plane;
147
148 /* Failover/synch */
149 u16 drop_point;
150 struct sk_buff *failover_reasm_skb;
151
152 /* Max packet negotiation */
153 u16 mtu;
154 u16 advertised_mtu;
155
156 /* Sending */
157 struct sk_buff_head transmq;
158 struct sk_buff_head backlogq;
159 struct {
160 u16 len;
161 u16 limit;
162 } backlog[5];
163 u16 snd_nxt;
164 u16 last_retransm;
165 u16 window;
166 u32 stale_count;
167
168 /* Reception */
169 u16 rcv_nxt;
170 u32 rcv_unacked;
171 struct sk_buff_head deferdq;
172 struct sk_buff_head *inputq;
173 struct sk_buff_head *namedq;
174
175 /* Congestion handling */
176 struct sk_buff_head wakeupq;
177
178 /* Fragmentation/reassembly */
179 struct sk_buff *reasm_buf;
180
181 /* Broadcast */
182 u16 ackers;
183 u16 acked;
184 struct tipc_link *bc_rcvlink;
185 struct tipc_link *bc_sndlink;
186 int nack_state;
187 bool bc_peer_is_up;
188
189 /* Statistics */
190 struct tipc_stats stats;
191};
192
48/* 193/*
49 * Error message prefixes 194 * Error message prefixes
50 */ 195 */
51static const char *link_co_err = "Link tunneling error, "; 196static const char *link_co_err = "Link tunneling error, ";
52static const char *link_rst_msg = "Resetting link "; 197static const char *link_rst_msg = "Resetting link ";
53static const char tipc_bclink_name[] = "broadcast-link";
54
55static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
57 [TIPC_NLA_LINK_NAME] = {
58 .type = NLA_STRING,
59 .len = TIPC_MAX_LINK_NAME
60 },
61 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
62 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
65 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
67 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
68 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
69};
70 198
71/* Properties valid for media, bearar and link */ 199/* Properties valid for media, bearar and link */
72static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { 200static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
@@ -117,8 +245,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
117static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, 245static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
118 u16 rcvgap, int tolerance, int priority, 246 u16 rcvgap, int tolerance, int priority,
119 struct sk_buff_head *xmitq); 247 struct sk_buff_head *xmitq);
120static void link_reset_statistics(struct tipc_link *l_ptr); 248static void link_print(struct tipc_link *l, const char *str);
121static void link_print(struct tipc_link *l_ptr, const char *str);
122static void tipc_link_build_nack_msg(struct tipc_link *l, 249static void tipc_link_build_nack_msg(struct tipc_link *l,
123 struct sk_buff_head *xmitq); 250 struct sk_buff_head *xmitq);
124static void tipc_link_build_bc_init_msg(struct tipc_link *l, 251static void tipc_link_build_bc_init_msg(struct tipc_link *l,
@@ -183,6 +310,36 @@ void tipc_link_set_active(struct tipc_link *l, bool active)
183 l->active = active; 310 l->active = active;
184} 311}
185 312
313u32 tipc_link_id(struct tipc_link *l)
314{
315 return l->peer_bearer_id << 16 | l->bearer_id;
316}
317
318int tipc_link_window(struct tipc_link *l)
319{
320 return l->window;
321}
322
323int tipc_link_prio(struct tipc_link *l)
324{
325 return l->priority;
326}
327
328unsigned long tipc_link_tolerance(struct tipc_link *l)
329{
330 return l->tolerance;
331}
332
333struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
334{
335 return l->inputq;
336}
337
338char tipc_link_plane(struct tipc_link *l)
339{
340 return l->net_plane;
341}
342
186void tipc_link_add_bc_peer(struct tipc_link *snd_l, 343void tipc_link_add_bc_peer(struct tipc_link *snd_l,
187 struct tipc_link *uc_l, 344 struct tipc_link *uc_l,
188 struct sk_buff_head *xmitq) 345 struct sk_buff_head *xmitq)
@@ -225,11 +382,31 @@ int tipc_link_mtu(struct tipc_link *l)
225 return l->mtu; 382 return l->mtu;
226} 383}
227 384
385u16 tipc_link_rcv_nxt(struct tipc_link *l)
386{
387 return l->rcv_nxt;
388}
389
390u16 tipc_link_acked(struct tipc_link *l)
391{
392 return l->acked;
393}
394
395char *tipc_link_name(struct tipc_link *l)
396{
397 return l->name;
398}
399
228static u32 link_own_addr(struct tipc_link *l) 400static u32 link_own_addr(struct tipc_link *l)
229{ 401{
230 return msg_prevnode(l->pmsg); 402 return msg_prevnode(l->pmsg);
231} 403}
232 404
405void tipc_link_reinit(struct tipc_link *l, u32 addr)
406{
407 msg_set_prevnode(l->pmsg, addr);
408}
409
233/** 410/**
234 * tipc_link_create - create a new link 411 * tipc_link_create - create a new link
235 * @n: pointer to associated node 412 * @n: pointer to associated node
@@ -692,7 +869,7 @@ void tipc_link_reset(struct tipc_link *l)
692 l->stats.recv_info = 0; 869 l->stats.recv_info = 0;
693 l->stale_count = 0; 870 l->stale_count = 0;
694 l->bc_peer_is_up = false; 871 l->bc_peer_is_up = false;
695 link_reset_statistics(l); 872 tipc_link_reset_stats(l);
696} 873}
697 874
698/** 875/**
@@ -1085,8 +1262,9 @@ drop:
1085/* 1262/*
1086 * Send protocol message to the other endpoint. 1263 * Send protocol message to the other endpoint.
1087 */ 1264 */
1088void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg, 1265static void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ,
1089 u32 gap, u32 tolerance, u32 priority) 1266 int probe_msg, u32 gap, u32 tolerance,
1267 u32 priority)
1090{ 1268{
1091 struct sk_buff *skb = NULL; 1269 struct sk_buff *skb = NULL;
1092 struct sk_buff_head xmitq; 1270 struct sk_buff_head xmitq;
@@ -1260,6 +1438,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1260 /* fall thru' */ 1438 /* fall thru' */
1261 1439
1262 case ACTIVATE_MSG: 1440 case ACTIVATE_MSG:
1441 skb_linearize(skb);
1442 hdr = buf_msg(skb);
1263 1443
1264 /* Complete own link name with peer's interface name */ 1444 /* Complete own link name with peer's interface name */
1265 if_name = strrchr(l->name, ':') + 1; 1445 if_name = strrchr(l->name, ':') + 1;
@@ -1525,53 +1705,17 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1525 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; 1705 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1526} 1706}
1527 1707
1528/* tipc_link_find_owner - locate owner node of link by link's name
1529 * @net: the applicable net namespace
1530 * @name: pointer to link name string
1531 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1532 *
1533 * Returns pointer to node owning the link, or 0 if no matching link is found.
1534 */
1535static struct tipc_node *tipc_link_find_owner(struct net *net,
1536 const char *link_name,
1537 unsigned int *bearer_id)
1538{
1539 struct tipc_net *tn = net_generic(net, tipc_net_id);
1540 struct tipc_link *l_ptr;
1541 struct tipc_node *n_ptr;
1542 struct tipc_node *found_node = NULL;
1543 int i;
1544
1545 *bearer_id = 0;
1546 rcu_read_lock();
1547 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1548 tipc_node_lock(n_ptr);
1549 for (i = 0; i < MAX_BEARERS; i++) {
1550 l_ptr = n_ptr->links[i].link;
1551 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1552 *bearer_id = i;
1553 found_node = n_ptr;
1554 break;
1555 }
1556 }
1557 tipc_node_unlock(n_ptr);
1558 if (found_node)
1559 break;
1560 }
1561 rcu_read_unlock();
1562
1563 return found_node;
1564}
1565
1566/** 1708/**
1567 * link_reset_statistics - reset link statistics 1709 * link_reset_stats - reset link statistics
1568 * @l_ptr: pointer to link 1710 * @l: pointer to link
1569 */ 1711 */
1570static void link_reset_statistics(struct tipc_link *l_ptr) 1712void tipc_link_reset_stats(struct tipc_link *l)
1571{ 1713{
1572 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 1714 memset(&l->stats, 0, sizeof(l->stats));
1573 l_ptr->stats.sent_info = l_ptr->snd_nxt; 1715 if (!link_is_bc_sndlink(l)) {
1574 l_ptr->stats.recv_info = l_ptr->rcv_nxt; 1716 l->stats.sent_info = l->snd_nxt;
1717 l->stats.recv_info = l->rcv_nxt;
1718 }
1575} 1719}
1576 1720
1577static void link_print(struct tipc_link *l, const char *str) 1721static void link_print(struct tipc_link *l, const char *str)
@@ -1624,84 +1768,6 @@ int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1624 return 0; 1768 return 0;
1625} 1769}
1626 1770
1627int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1628{
1629 int err;
1630 int res = 0;
1631 int bearer_id;
1632 char *name;
1633 struct tipc_link *link;
1634 struct tipc_node *node;
1635 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1636 struct net *net = sock_net(skb->sk);
1637
1638 if (!info->attrs[TIPC_NLA_LINK])
1639 return -EINVAL;
1640
1641 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1642 info->attrs[TIPC_NLA_LINK],
1643 tipc_nl_link_policy);
1644 if (err)
1645 return err;
1646
1647 if (!attrs[TIPC_NLA_LINK_NAME])
1648 return -EINVAL;
1649
1650 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1651
1652 if (strcmp(name, tipc_bclink_name) == 0)
1653 return tipc_nl_bc_link_set(net, attrs);
1654
1655 node = tipc_link_find_owner(net, name, &bearer_id);
1656 if (!node)
1657 return -EINVAL;
1658
1659 tipc_node_lock(node);
1660
1661 link = node->links[bearer_id].link;
1662 if (!link) {
1663 res = -EINVAL;
1664 goto out;
1665 }
1666
1667 if (attrs[TIPC_NLA_LINK_PROP]) {
1668 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1669
1670 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1671 props);
1672 if (err) {
1673 res = err;
1674 goto out;
1675 }
1676
1677 if (props[TIPC_NLA_PROP_TOL]) {
1678 u32 tol;
1679
1680 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1681 link->tolerance = tol;
1682 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
1683 }
1684 if (props[TIPC_NLA_PROP_PRIO]) {
1685 u32 prio;
1686
1687 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1688 link->priority = prio;
1689 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
1690 }
1691 if (props[TIPC_NLA_PROP_WIN]) {
1692 u32 win;
1693
1694 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1695 tipc_link_set_queue_limits(link, win);
1696 }
1697 }
1698
1699out:
1700 tipc_node_unlock(node);
1701
1702 return res;
1703}
1704
1705static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) 1771static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1706{ 1772{
1707 int i; 1773 int i;
@@ -1768,8 +1834,8 @@ msg_full:
1768} 1834}
1769 1835
1770/* Caller should hold appropriate locks to protect the link */ 1836/* Caller should hold appropriate locks to protect the link */
1771static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, 1837int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1772 struct tipc_link *link, int nlflags) 1838 struct tipc_link *link, int nlflags)
1773{ 1839{
1774 int err; 1840 int err;
1775 void *hdr; 1841 void *hdr;
@@ -1838,198 +1904,134 @@ msg_full:
1838 return -EMSGSIZE; 1904 return -EMSGSIZE;
1839} 1905}
1840 1906
1841/* Caller should hold node lock */ 1907static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
1842static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, 1908 struct tipc_stats *stats)
1843 struct tipc_node *node, u32 *prev_link)
1844{ 1909{
1845 u32 i; 1910 int i;
1846 int err; 1911 struct nlattr *nest;
1847
1848 for (i = *prev_link; i < MAX_BEARERS; i++) {
1849 *prev_link = i;
1850
1851 if (!node->links[i].link)
1852 continue;
1853 1912
1854 err = __tipc_nl_add_link(net, msg, 1913 struct nla_map {
1855 node->links[i].link, NLM_F_MULTI); 1914 __u32 key;
1856 if (err) 1915 __u32 val;
1857 return err; 1916 };
1858 }
1859 *prev_link = 0;
1860 1917
1861 return 0; 1918 struct nla_map map[] = {
1862} 1919 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
1920 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
1921 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
1922 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
1923 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
1924 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
1925 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
1926 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
1927 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
1928 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
1929 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
1930 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
1931 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
1932 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
1933 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
1934 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
1935 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
1936 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
1937 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
1938 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
1939 };
1863 1940
1864int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) 1941 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1865{ 1942 if (!nest)
1866 struct net *net = sock_net(skb->sk); 1943 return -EMSGSIZE;
1867 struct tipc_net *tn = net_generic(net, tipc_net_id);
1868 struct tipc_node *node;
1869 struct tipc_nl_msg msg;
1870 u32 prev_node = cb->args[0];
1871 u32 prev_link = cb->args[1];
1872 int done = cb->args[2];
1873 int err;
1874 1944
1875 if (done) 1945 for (i = 0; i < ARRAY_SIZE(map); i++)
1876 return 0; 1946 if (nla_put_u32(skb, map[i].key, map[i].val))
1947 goto msg_full;
1877 1948
1878 msg.skb = skb; 1949 nla_nest_end(skb, nest);
1879 msg.portid = NETLINK_CB(cb->skb).portid;
1880 msg.seq = cb->nlh->nlmsg_seq;
1881
1882 rcu_read_lock();
1883 if (prev_node) {
1884 node = tipc_node_find(net, prev_node);
1885 if (!node) {
1886 /* We never set seq or call nl_dump_check_consistent()
1887 * this means that setting prev_seq here will cause the
1888 * consistence check to fail in the netlink callback
1889 * handler. Resulting in the last NLMSG_DONE message
1890 * having the NLM_F_DUMP_INTR flag set.
1891 */
1892 cb->prev_seq = 1;
1893 goto out;
1894 }
1895 tipc_node_put(node);
1896
1897 list_for_each_entry_continue_rcu(node, &tn->node_list,
1898 list) {
1899 tipc_node_lock(node);
1900 err = __tipc_nl_add_node_links(net, &msg, node,
1901 &prev_link);
1902 tipc_node_unlock(node);
1903 if (err)
1904 goto out;
1905
1906 prev_node = node->addr;
1907 }
1908 } else {
1909 err = tipc_nl_add_bc_link(net, &msg);
1910 if (err)
1911 goto out;
1912
1913 list_for_each_entry_rcu(node, &tn->node_list, list) {
1914 tipc_node_lock(node);
1915 err = __tipc_nl_add_node_links(net, &msg, node,
1916 &prev_link);
1917 tipc_node_unlock(node);
1918 if (err)
1919 goto out;
1920
1921 prev_node = node->addr;
1922 }
1923 }
1924 done = 1;
1925out:
1926 rcu_read_unlock();
1927 1950
1928 cb->args[0] = prev_node; 1951 return 0;
1929 cb->args[1] = prev_link; 1952msg_full:
1930 cb->args[2] = done; 1953 nla_nest_cancel(skb, nest);
1931 1954
1932 return skb->len; 1955 return -EMSGSIZE;
1933} 1956}
1934 1957
1935int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) 1958int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
1936{ 1959{
1937 struct net *net = genl_info_net(info);
1938 struct tipc_nl_msg msg;
1939 char *name;
1940 int err; 1960 int err;
1961 void *hdr;
1962 struct nlattr *attrs;
1963 struct nlattr *prop;
1964 struct tipc_net *tn = net_generic(net, tipc_net_id);
1965 struct tipc_link *bcl = tn->bcl;
1941 1966
1942 msg.portid = info->snd_portid; 1967 if (!bcl)
1943 msg.seq = info->snd_seq; 1968 return 0;
1944
1945 if (!info->attrs[TIPC_NLA_LINK_NAME])
1946 return -EINVAL;
1947 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
1948 1969
1949 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1970 tipc_bcast_lock(net);
1950 if (!msg.skb)
1951 return -ENOMEM;
1952 1971
1953 if (strcmp(name, tipc_bclink_name) == 0) { 1972 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1954 err = tipc_nl_add_bc_link(net, &msg); 1973 NLM_F_MULTI, TIPC_NL_LINK_GET);
1955 if (err) { 1974 if (!hdr)
1956 nlmsg_free(msg.skb); 1975 return -EMSGSIZE;
1957 return err;
1958 }
1959 } else {
1960 int bearer_id;
1961 struct tipc_node *node;
1962 struct tipc_link *link;
1963 1976
1964 node = tipc_link_find_owner(net, name, &bearer_id); 1977 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1965 if (!node) 1978 if (!attrs)
1966 return -EINVAL; 1979 goto msg_full;
1967 1980
1968 tipc_node_lock(node); 1981 /* The broadcast link is always up */
1969 link = node->links[bearer_id].link; 1982 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1970 if (!link) { 1983 goto attr_msg_full;
1971 tipc_node_unlock(node);
1972 nlmsg_free(msg.skb);
1973 return -EINVAL;
1974 }
1975 1984
1976 err = __tipc_nl_add_link(net, &msg, link, 0); 1985 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
1977 tipc_node_unlock(node); 1986 goto attr_msg_full;
1978 if (err) { 1987 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
1979 nlmsg_free(msg.skb); 1988 goto attr_msg_full;
1980 return err; 1989 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
1981 } 1990 goto attr_msg_full;
1982 } 1991 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
1992 goto attr_msg_full;
1983 1993
1984 return genlmsg_reply(msg.skb, info); 1994 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1985} 1995 if (!prop)
1996 goto attr_msg_full;
1997 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
1998 goto prop_msg_full;
1999 nla_nest_end(msg->skb, prop);
1986 2000
1987int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info) 2001 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
1988{
1989 int err;
1990 char *link_name;
1991 unsigned int bearer_id;
1992 struct tipc_link *link;
1993 struct tipc_node *node;
1994 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1995 struct net *net = sock_net(skb->sk);
1996
1997 if (!info->attrs[TIPC_NLA_LINK])
1998 return -EINVAL;
1999
2000 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2001 info->attrs[TIPC_NLA_LINK],
2002 tipc_nl_link_policy);
2003 if (err) 2002 if (err)
2004 return err; 2003 goto attr_msg_full;
2005
2006 if (!attrs[TIPC_NLA_LINK_NAME])
2007 return -EINVAL;
2008
2009 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2010 2004
2011 if (strcmp(link_name, tipc_bclink_name) == 0) { 2005 tipc_bcast_unlock(net);
2012 err = tipc_bclink_reset_stats(net); 2006 nla_nest_end(msg->skb, attrs);
2013 if (err) 2007 genlmsg_end(msg->skb, hdr);
2014 return err;
2015 return 0;
2016 }
2017 2008
2018 node = tipc_link_find_owner(net, link_name, &bearer_id); 2009 return 0;
2019 if (!node)
2020 return -EINVAL;
2021 2010
2022 tipc_node_lock(node); 2011prop_msg_full:
2012 nla_nest_cancel(msg->skb, prop);
2013attr_msg_full:
2014 nla_nest_cancel(msg->skb, attrs);
2015msg_full:
2016 tipc_bcast_unlock(net);
2017 genlmsg_cancel(msg->skb, hdr);
2023 2018
2024 link = node->links[bearer_id].link; 2019 return -EMSGSIZE;
2025 if (!link) { 2020}
2026 tipc_node_unlock(node);
2027 return -EINVAL;
2028 }
2029 2021
2030 link_reset_statistics(link); 2022void tipc_link_set_tolerance(struct tipc_link *l, u32 tol)
2023{
2024 l->tolerance = tol;
2025 tipc_link_proto_xmit(l, STATE_MSG, 0, 0, tol, 0);
2026}
2031 2027
2032 tipc_node_unlock(node); 2028void tipc_link_set_prio(struct tipc_link *l, u32 prio)
2029{
2030 l->priority = prio;
2031 tipc_link_proto_xmit(l, STATE_MSG, 0, 0, 0, prio);
2032}
2033 2033
2034 return 0; 2034void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2035{
2036 l->abort_limit = limit;
2035} 2037}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 66d859b66c84..b2ae0f4276af 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -45,10 +45,6 @@
45*/ 45*/
46#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */ 46#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
47 47
48/* Out-of-range value for link sequence numbers
49 */
50#define INVALID_LINK_SEQ 0x10000
51
52/* Link FSM events: 48/* Link FSM events:
53 */ 49 */
54enum { 50enum {
@@ -75,151 +71,6 @@ enum {
75 */ 71 */
76#define MAX_PKT_DEFAULT 1500 72#define MAX_PKT_DEFAULT 1500
77 73
78struct tipc_stats {
79 u32 sent_info; /* used in counting # sent packets */
80 u32 recv_info; /* used in counting # recv'd packets */
81 u32 sent_states;
82 u32 recv_states;
83 u32 sent_probes;
84 u32 recv_probes;
85 u32 sent_nacks;
86 u32 recv_nacks;
87 u32 sent_acks;
88 u32 sent_bundled;
89 u32 sent_bundles;
90 u32 recv_bundled;
91 u32 recv_bundles;
92 u32 retransmitted;
93 u32 sent_fragmented;
94 u32 sent_fragments;
95 u32 recv_fragmented;
96 u32 recv_fragments;
97 u32 link_congs; /* # port sends blocked by congestion */
98 u32 deferred_recv;
99 u32 duplicates;
100 u32 max_queue_sz; /* send queue size high water mark */
101 u32 accu_queue_sz; /* used for send queue size profiling */
102 u32 queue_sz_counts; /* used for send queue size profiling */
103 u32 msg_length_counts; /* used for message length profiling */
104 u32 msg_lengths_total; /* used for message length profiling */
105 u32 msg_length_profile[7]; /* used for msg. length profiling */
106};
107
108/**
109 * struct tipc_link - TIPC link data structure
110 * @addr: network address of link's peer node
111 * @name: link name character string
112 * @media_addr: media address to use when sending messages over link
113 * @timer: link timer
114 * @net: pointer to namespace struct
115 * @refcnt: reference counter for permanent references (owner node & timer)
116 * @peer_session: link session # being used by peer end of link
117 * @peer_bearer_id: bearer id used by link's peer endpoint
118 * @bearer_id: local bearer id used by link
119 * @tolerance: minimum link continuity loss needed to reset link [in ms]
120 * @keepalive_intv: link keepalive timer interval
121 * @abort_limit: # of unacknowledged continuity probes needed to reset link
122 * @state: current state of link FSM
123 * @peer_caps: bitmap describing capabilities of peer node
124 * @silent_intv_cnt: # of timer intervals without any reception from peer
125 * @proto_msg: template for control messages generated by link
126 * @pmsg: convenience pointer to "proto_msg" field
127 * @priority: current link priority
128 * @net_plane: current link network plane ('A' through 'H')
129 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
130 * @exp_msg_count: # of tunnelled messages expected during link changeover
131 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
132 * @mtu: current maximum packet size for this link
133 * @advertised_mtu: advertised own mtu when link is being established
134 * @transmitq: queue for sent, non-acked messages
135 * @backlogq: queue for messages waiting to be sent
136 * @snt_nxt: next sequence number to use for outbound messages
137 * @last_retransmitted: sequence number of most recently retransmitted message
138 * @stale_count: # of identical retransmit requests made by peer
139 * @ackers: # of peers that needs to ack each packet before it can be released
140 * @acked: # last packet acked by a certain peer. Used for broadcast.
141 * @rcv_nxt: next sequence number to expect for inbound messages
142 * @deferred_queue: deferred queue saved OOS b'cast message received from node
143 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
144 * @inputq: buffer queue for messages to be delivered upwards
145 * @namedq: buffer queue for name table messages to be delivered upwards
146 * @next_out: ptr to first unsent outbound message in queue
147 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
148 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
149 * @reasm_buf: head of partially reassembled inbound message fragments
150 * @bc_rcvr: marks that this is a broadcast receiver link
151 * @stats: collects statistics regarding link activity
152 */
153struct tipc_link {
154 u32 addr;
155 char name[TIPC_MAX_LINK_NAME];
156 struct tipc_media_addr *media_addr;
157 struct net *net;
158
159 /* Management and link supervision data */
160 u32 peer_session;
161 u32 peer_bearer_id;
162 u32 bearer_id;
163 u32 tolerance;
164 unsigned long keepalive_intv;
165 u32 abort_limit;
166 u32 state;
167 u16 peer_caps;
168 bool active;
169 u32 silent_intv_cnt;
170 struct {
171 unchar hdr[INT_H_SIZE];
172 unchar body[TIPC_MAX_IF_NAME];
173 } proto_msg;
174 struct tipc_msg *pmsg;
175 u32 priority;
176 char net_plane;
177
178 /* Failover/synch */
179 u16 drop_point;
180 struct sk_buff *failover_reasm_skb;
181
182 /* Max packet negotiation */
183 u16 mtu;
184 u16 advertised_mtu;
185
186 /* Sending */
187 struct sk_buff_head transmq;
188 struct sk_buff_head backlogq;
189 struct {
190 u16 len;
191 u16 limit;
192 } backlog[5];
193 u16 snd_nxt;
194 u16 last_retransm;
195 u16 window;
196 u32 stale_count;
197
198 /* Reception */
199 u16 rcv_nxt;
200 u32 rcv_unacked;
201 struct sk_buff_head deferdq;
202 struct sk_buff_head *inputq;
203 struct sk_buff_head *namedq;
204
205 /* Congestion handling */
206 struct sk_buff_head wakeupq;
207
208 /* Fragmentation/reassembly */
209 struct sk_buff *reasm_buf;
210
211 /* Broadcast */
212 u16 ackers;
213 u16 acked;
214 struct tipc_link *bc_rcvlink;
215 struct tipc_link *bc_sndlink;
216 int nack_state;
217 bool bc_peer_is_up;
218
219 /* Statistics */
220 struct tipc_stats stats;
221};
222
223bool tipc_link_create(struct net *net, char *if_name, int bearer_id, 74bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
224 int tolerance, char net_plane, u32 mtu, int priority, 75 int tolerance, char net_plane, u32 mtu, int priority,
225 int window, u32 session, u32 ownnode, u32 peer, 76 int window, u32 session, u32 ownnode, u32 peer,
@@ -235,11 +86,11 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
235 struct sk_buff_head *namedq, 86 struct sk_buff_head *namedq,
236 struct tipc_link *bc_sndlink, 87 struct tipc_link *bc_sndlink,
237 struct tipc_link **link); 88 struct tipc_link **link);
89void tipc_link_reinit(struct tipc_link *l, u32 addr);
238void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, 90void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
239 int mtyp, struct sk_buff_head *xmitq); 91 int mtyp, struct sk_buff_head *xmitq);
240void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq); 92void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
241int tipc_link_fsm_evt(struct tipc_link *l, int evt); 93int tipc_link_fsm_evt(struct tipc_link *l, int evt);
242void tipc_link_reset_fragments(struct tipc_link *l_ptr);
243bool tipc_link_is_up(struct tipc_link *l); 94bool tipc_link_is_up(struct tipc_link *l);
244bool tipc_link_peer_is_down(struct tipc_link *l); 95bool tipc_link_peer_is_down(struct tipc_link *l);
245bool tipc_link_is_reset(struct tipc_link *l); 96bool tipc_link_is_reset(struct tipc_link *l);
@@ -248,15 +99,25 @@ bool tipc_link_is_synching(struct tipc_link *l);
248bool tipc_link_is_failingover(struct tipc_link *l); 99bool tipc_link_is_failingover(struct tipc_link *l);
249bool tipc_link_is_blocked(struct tipc_link *l); 100bool tipc_link_is_blocked(struct tipc_link *l);
250void tipc_link_set_active(struct tipc_link *l, bool active); 101void tipc_link_set_active(struct tipc_link *l, bool active);
251void tipc_link_reset(struct tipc_link *l_ptr); 102void tipc_link_reset(struct tipc_link *l);
252int tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list, 103void tipc_link_reset_stats(struct tipc_link *l);
104int tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list,
253 struct sk_buff_head *xmitq); 105 struct sk_buff_head *xmitq);
106struct sk_buff_head *tipc_link_inputq(struct tipc_link *l);
107u16 tipc_link_rcv_nxt(struct tipc_link *l);
108u16 tipc_link_acked(struct tipc_link *l);
109u32 tipc_link_id(struct tipc_link *l);
110char *tipc_link_name(struct tipc_link *l);
111char tipc_link_plane(struct tipc_link *l);
112int tipc_link_prio(struct tipc_link *l);
113int tipc_link_window(struct tipc_link *l);
114unsigned long tipc_link_tolerance(struct tipc_link *l);
115void tipc_link_set_tolerance(struct tipc_link *l, u32 tol);
116void tipc_link_set_prio(struct tipc_link *l, u32 prio);
117void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit);
254void tipc_link_set_queue_limits(struct tipc_link *l, u32 window); 118void tipc_link_set_queue_limits(struct tipc_link *l, u32 window);
255 119int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
256int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb); 120 struct tipc_link *link, int nlflags);
257int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
258int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
259int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
260int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]); 121int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
261int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq); 122int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
262int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, 123int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index c07612bab95c..ebe9d0ff6e9e 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -84,31 +84,6 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
84 return buf; 84 return buf;
85} 85}
86 86
87void named_cluster_distribute(struct net *net, struct sk_buff *skb)
88{
89 struct tipc_net *tn = net_generic(net, tipc_net_id);
90 struct sk_buff *oskb;
91 struct tipc_node *node;
92 u32 dnode;
93
94 rcu_read_lock();
95 list_for_each_entry_rcu(node, &tn->node_list, list) {
96 dnode = node->addr;
97 if (in_own_node(net, dnode))
98 continue;
99 if (!tipc_node_is_up(node))
100 continue;
101 oskb = pskb_copy(skb, GFP_ATOMIC);
102 if (!oskb)
103 break;
104 msg_set_destnode(buf_msg(oskb), dnode);
105 tipc_node_xmit_skb(net, oskb, dnode, 0);
106 }
107 rcu_read_unlock();
108
109 kfree_skb(skb);
110}
111
112/** 87/**
113 * tipc_named_publish - tell other nodes about a new publication by this node 88 * tipc_named_publish - tell other nodes about a new publication by this node
114 */ 89 */
@@ -226,42 +201,6 @@ void tipc_named_node_up(struct net *net, u32 dnode)
226 tipc_node_xmit(net, &head, dnode, 0); 201 tipc_node_xmit(net, &head, dnode, 0);
227} 202}
228 203
229static void tipc_publ_subscribe(struct net *net, struct publication *publ,
230 u32 addr)
231{
232 struct tipc_node *node;
233
234 if (in_own_node(net, addr))
235 return;
236
237 node = tipc_node_find(net, addr);
238 if (!node) {
239 pr_warn("Node subscription rejected, unknown node 0x%x\n",
240 addr);
241 return;
242 }
243
244 tipc_node_lock(node);
245 list_add_tail(&publ->nodesub_list, &node->publ_list);
246 tipc_node_unlock(node);
247 tipc_node_put(node);
248}
249
250static void tipc_publ_unsubscribe(struct net *net, struct publication *publ,
251 u32 addr)
252{
253 struct tipc_node *node;
254
255 node = tipc_node_find(net, addr);
256 if (!node)
257 return;
258
259 tipc_node_lock(node);
260 list_del_init(&publ->nodesub_list);
261 tipc_node_unlock(node);
262 tipc_node_put(node);
263}
264
265/** 204/**
266 * tipc_publ_purge - remove publication associated with a failed node 205 * tipc_publ_purge - remove publication associated with a failed node
267 * 206 *
@@ -277,7 +216,7 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
277 p = tipc_nametbl_remove_publ(net, publ->type, publ->lower, 216 p = tipc_nametbl_remove_publ(net, publ->type, publ->lower,
278 publ->node, publ->ref, publ->key); 217 publ->node, publ->ref, publ->key);
279 if (p) 218 if (p)
280 tipc_publ_unsubscribe(net, p, addr); 219 tipc_node_unsubscribe(net, &p->nodesub_list, addr);
281 spin_unlock_bh(&tn->nametbl_lock); 220 spin_unlock_bh(&tn->nametbl_lock);
282 221
283 if (p != publ) { 222 if (p != publ) {
@@ -317,7 +256,7 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
317 TIPC_CLUSTER_SCOPE, node, 256 TIPC_CLUSTER_SCOPE, node,
318 ntohl(i->ref), ntohl(i->key)); 257 ntohl(i->ref), ntohl(i->key));
319 if (publ) { 258 if (publ) {
320 tipc_publ_subscribe(net, publ, node); 259 tipc_node_subscribe(net, &publ->nodesub_list, node);
321 return true; 260 return true;
322 } 261 }
323 } else if (dtype == WITHDRAWAL) { 262 } else if (dtype == WITHDRAWAL) {
@@ -326,7 +265,7 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
326 node, ntohl(i->ref), 265 node, ntohl(i->ref),
327 ntohl(i->key)); 266 ntohl(i->key));
328 if (publ) { 267 if (publ) {
329 tipc_publ_unsubscribe(net, publ, node); 268 tipc_node_unsubscribe(net, &publ->nodesub_list, node);
330 kfree_rcu(publ, rcu); 269 kfree_rcu(publ, rcu);
331 return true; 270 return true;
332 } 271 }
@@ -397,6 +336,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
397 336
398 spin_lock_bh(&tn->nametbl_lock); 337 spin_lock_bh(&tn->nametbl_lock);
399 for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { 338 for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) {
339 skb_linearize(skb);
400 msg = buf_msg(skb); 340 msg = buf_msg(skb);
401 mtype = msg_type(msg); 341 mtype = msg_type(msg);
402 item = (struct distr_item *)msg_data(msg); 342 item = (struct distr_item *)msg_data(msg);
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index dd2d9fd80da2..1264ba0af937 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -69,7 +69,6 @@ struct distr_item {
69 69
70struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); 70struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ);
71struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); 71struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ);
72void named_cluster_distribute(struct net *net, struct sk_buff *buf);
73void tipc_named_node_up(struct net *net, u32 dnode); 72void tipc_named_node_up(struct net *net, u32 dnode);
74void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); 73void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue);
75void tipc_named_reinit(struct net *net); 74void tipc_named_reinit(struct net *net);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 0f47f08bf38f..91fce70291a8 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -42,6 +42,7 @@
42#include "subscr.h" 42#include "subscr.h"
43#include "bcast.h" 43#include "bcast.h"
44#include "addr.h" 44#include "addr.h"
45#include "node.h"
45#include <net/genetlink.h> 46#include <net/genetlink.h>
46 47
47#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */ 48#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
@@ -677,7 +678,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
677 spin_unlock_bh(&tn->nametbl_lock); 678 spin_unlock_bh(&tn->nametbl_lock);
678 679
679 if (buf) 680 if (buf)
680 named_cluster_distribute(net, buf); 681 tipc_node_broadcast(net, buf);
681 return publ; 682 return publ;
682} 683}
683 684
@@ -709,7 +710,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
709 spin_unlock_bh(&tn->nametbl_lock); 710 spin_unlock_bh(&tn->nametbl_lock);
710 711
711 if (skb) { 712 if (skb) {
712 named_cluster_distribute(net, skb); 713 tipc_node_broadcast(net, skb);
713 return 1; 714 return 1;
714 } 715 }
715 return 0; 716 return 0;
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 7f6475efc984..8975b0135b76 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -101,18 +101,18 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
101 }, 101 },
102 { 102 {
103 .cmd = TIPC_NL_LINK_GET, 103 .cmd = TIPC_NL_LINK_GET,
104 .doit = tipc_nl_link_get, 104 .doit = tipc_nl_node_get_link,
105 .dumpit = tipc_nl_link_dump, 105 .dumpit = tipc_nl_node_dump_link,
106 .policy = tipc_nl_policy, 106 .policy = tipc_nl_policy,
107 }, 107 },
108 { 108 {
109 .cmd = TIPC_NL_LINK_SET, 109 .cmd = TIPC_NL_LINK_SET,
110 .doit = tipc_nl_link_set, 110 .doit = tipc_nl_node_set_link,
111 .policy = tipc_nl_policy, 111 .policy = tipc_nl_policy,
112 }, 112 },
113 { 113 {
114 .cmd = TIPC_NL_LINK_RESET_STATS, 114 .cmd = TIPC_NL_LINK_RESET_STATS,
115 .doit = tipc_nl_link_reset_stats, 115 .doit = tipc_nl_node_reset_link_stats,
116 .policy = tipc_nl_policy, 116 .policy = tipc_nl_policy,
117 }, 117 },
118 { 118 {
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 1eadc95e1132..2c016fdefe97 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -1023,25 +1023,25 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
1023 msg->req_type = TIPC_TLV_LINK_NAME; 1023 msg->req_type = TIPC_TLV_LINK_NAME;
1024 msg->rep_size = ULTRA_STRING_MAX_LEN; 1024 msg->rep_size = ULTRA_STRING_MAX_LEN;
1025 msg->rep_type = TIPC_TLV_ULTRA_STRING; 1025 msg->rep_type = TIPC_TLV_ULTRA_STRING;
1026 dump.dumpit = tipc_nl_link_dump; 1026 dump.dumpit = tipc_nl_node_dump_link;
1027 dump.format = tipc_nl_compat_link_stat_dump; 1027 dump.format = tipc_nl_compat_link_stat_dump;
1028 return tipc_nl_compat_dumpit(&dump, msg); 1028 return tipc_nl_compat_dumpit(&dump, msg);
1029 case TIPC_CMD_GET_LINKS: 1029 case TIPC_CMD_GET_LINKS:
1030 msg->req_type = TIPC_TLV_NET_ADDR; 1030 msg->req_type = TIPC_TLV_NET_ADDR;
1031 msg->rep_size = ULTRA_STRING_MAX_LEN; 1031 msg->rep_size = ULTRA_STRING_MAX_LEN;
1032 dump.dumpit = tipc_nl_link_dump; 1032 dump.dumpit = tipc_nl_node_dump_link;
1033 dump.format = tipc_nl_compat_link_dump; 1033 dump.format = tipc_nl_compat_link_dump;
1034 return tipc_nl_compat_dumpit(&dump, msg); 1034 return tipc_nl_compat_dumpit(&dump, msg);
1035 case TIPC_CMD_SET_LINK_TOL: 1035 case TIPC_CMD_SET_LINK_TOL:
1036 case TIPC_CMD_SET_LINK_PRI: 1036 case TIPC_CMD_SET_LINK_PRI:
1037 case TIPC_CMD_SET_LINK_WINDOW: 1037 case TIPC_CMD_SET_LINK_WINDOW:
1038 msg->req_type = TIPC_TLV_LINK_CONFIG; 1038 msg->req_type = TIPC_TLV_LINK_CONFIG;
1039 doit.doit = tipc_nl_link_set; 1039 doit.doit = tipc_nl_node_set_link;
1040 doit.transcode = tipc_nl_compat_link_set; 1040 doit.transcode = tipc_nl_compat_link_set;
1041 return tipc_nl_compat_doit(&doit, msg); 1041 return tipc_nl_compat_doit(&doit, msg);
1042 case TIPC_CMD_RESET_LINK_STATS: 1042 case TIPC_CMD_RESET_LINK_STATS:
1043 msg->req_type = TIPC_TLV_LINK_NAME; 1043 msg->req_type = TIPC_TLV_LINK_NAME;
1044 doit.doit = tipc_nl_link_reset_stats; 1044 doit.doit = tipc_nl_node_reset_link_stats;
1045 doit.transcode = tipc_nl_compat_link_reset_stats; 1045 doit.transcode = tipc_nl_compat_link_reset_stats;
1046 return tipc_nl_compat_doit(&doit, msg); 1046 return tipc_nl_compat_doit(&doit, msg);
1047 case TIPC_CMD_SHOW_NAME_TABLE: 1047 case TIPC_CMD_SHOW_NAME_TABLE:
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 20cddec0a43c..3f7a4ed71990 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -42,6 +42,84 @@
42#include "bcast.h" 42#include "bcast.h"
43#include "discover.h" 43#include "discover.h"
44 44
45#define INVALID_NODE_SIG 0x10000
46
47/* Flags used to take different actions according to flag type
48 * TIPC_NOTIFY_NODE_DOWN: notify node is down
49 * TIPC_NOTIFY_NODE_UP: notify node is up
50 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
51 */
52enum {
53 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
54 TIPC_NOTIFY_NODE_UP = (1 << 4),
55 TIPC_NOTIFY_LINK_UP = (1 << 6),
56 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
57};
58
59struct tipc_link_entry {
60 struct tipc_link *link;
61 spinlock_t lock; /* per link */
62 u32 mtu;
63 struct sk_buff_head inputq;
64 struct tipc_media_addr maddr;
65};
66
67struct tipc_bclink_entry {
68 struct tipc_link *link;
69 struct sk_buff_head inputq1;
70 struct sk_buff_head arrvq;
71 struct sk_buff_head inputq2;
72 struct sk_buff_head namedq;
73};
74
75/**
76 * struct tipc_node - TIPC node structure
77 * @addr: network address of node
78 * @ref: reference counter to node object
79 * @lock: rwlock governing access to structure
80 * @net: the applicable net namespace
81 * @hash: links to adjacent nodes in unsorted hash chain
82 * @inputq: pointer to input queue containing messages for msg event
83 * @namedq: pointer to name table input queue with name table messages
84 * @active_links: bearer ids of active links, used as index into links[] array
85 * @links: array containing references to all links to node
86 * @action_flags: bit mask of different types of node actions
87 * @state: connectivity state vs peer node
88 * @sync_point: sequence number where synch/failover is finished
89 * @list: links to adjacent nodes in sorted list of cluster's nodes
90 * @working_links: number of working links to node (both active and standby)
91 * @link_cnt: number of links to node
92 * @capabilities: bitmap, indicating peer node's functional capabilities
93 * @signature: node instance identifier
94 * @link_id: local and remote bearer ids of changing link, if any
95 * @publ_list: list of publications
96 * @rcu: rcu struct for tipc_node
97 */
98struct tipc_node {
99 u32 addr;
100 struct kref kref;
101 rwlock_t lock;
102 struct net *net;
103 struct hlist_node hash;
104 int active_links[2];
105 struct tipc_link_entry links[MAX_BEARERS];
106 struct tipc_bclink_entry bc_entry;
107 int action_flags;
108 struct list_head list;
109 int state;
110 u16 sync_point;
111 int link_cnt;
112 u16 working_links;
113 u16 capabilities;
114 u32 signature;
115 u32 link_id;
116 struct list_head publ_list;
117 struct list_head conn_sks;
118 unsigned long keepalive_intv;
119 struct timer_list timer;
120 struct rcu_head rcu;
121};
122
45/* Node FSM states and events: 123/* Node FSM states and events:
46 */ 124 */
47enum { 125enum {
@@ -75,6 +153,9 @@ static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
75static void tipc_node_delete(struct tipc_node *node); 153static void tipc_node_delete(struct tipc_node *node);
76static void tipc_node_timeout(unsigned long data); 154static void tipc_node_timeout(unsigned long data);
77static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 155static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
156static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
157static void tipc_node_put(struct tipc_node *node);
158static bool tipc_node_is_up(struct tipc_node *n);
78 159
79struct tipc_sock_conn { 160struct tipc_sock_conn {
80 u32 port; 161 u32 port;
@@ -83,12 +164,54 @@ struct tipc_sock_conn {
83 struct list_head list; 164 struct list_head list;
84}; 165};
85 166
167static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
168 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
169 [TIPC_NLA_LINK_NAME] = {
170 .type = NLA_STRING,
171 .len = TIPC_MAX_LINK_NAME
172 },
173 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
174 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
175 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
176 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
177 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
178 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
179 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
180 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
181};
182
86static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = { 183static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
87 [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC }, 184 [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
88 [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 }, 185 [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
89 [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG } 186 [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG }
90}; 187};
91 188
189static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
190{
191 int bearer_id = n->active_links[sel & 1];
192
193 if (unlikely(bearer_id == INVALID_BEARER_ID))
194 return NULL;
195
196 return n->links[bearer_id].link;
197}
198
199int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
200{
201 struct tipc_node *n;
202 int bearer_id;
203 unsigned int mtu = MAX_MSG_SIZE;
204
205 n = tipc_node_find(net, addr);
206 if (unlikely(!n))
207 return mtu;
208
209 bearer_id = n->active_links[sel & 1];
210 if (likely(bearer_id != INVALID_BEARER_ID))
211 mtu = n->links[bearer_id].mtu;
212 tipc_node_put(n);
213 return mtu;
214}
92/* 215/*
93 * A trivial power-of-two bitmask technique is used for speed, since this 216 * A trivial power-of-two bitmask technique is used for speed, since this
94 * operation is done for every incoming TIPC packet. The number of hash table 217 * operation is done for every incoming TIPC packet. The number of hash table
@@ -107,7 +230,7 @@ static void tipc_node_kref_release(struct kref *kref)
107 tipc_node_delete(node); 230 tipc_node_delete(node);
108} 231}
109 232
110void tipc_node_put(struct tipc_node *node) 233static void tipc_node_put(struct tipc_node *node)
111{ 234{
112 kref_put(&node->kref, tipc_node_kref_release); 235 kref_put(&node->kref, tipc_node_kref_release);
113} 236}
@@ -120,7 +243,7 @@ static void tipc_node_get(struct tipc_node *node)
120/* 243/*
121 * tipc_node_find - locate specified node object, if it exists 244 * tipc_node_find - locate specified node object, if it exists
122 */ 245 */
123struct tipc_node *tipc_node_find(struct net *net, u32 addr) 246static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
124{ 247{
125 struct tipc_net *tn = net_generic(net, tipc_net_id); 248 struct tipc_net *tn = net_generic(net, tipc_net_id);
126 struct tipc_node *node; 249 struct tipc_node *node;
@@ -141,66 +264,122 @@ struct tipc_node *tipc_node_find(struct net *net, u32 addr)
141 return NULL; 264 return NULL;
142} 265}
143 266
267static void tipc_node_read_lock(struct tipc_node *n)
268{
269 read_lock_bh(&n->lock);
270}
271
272static void tipc_node_read_unlock(struct tipc_node *n)
273{
274 read_unlock_bh(&n->lock);
275}
276
277static void tipc_node_write_lock(struct tipc_node *n)
278{
279 write_lock_bh(&n->lock);
280}
281
282static void tipc_node_write_unlock(struct tipc_node *n)
283{
284 struct net *net = n->net;
285 u32 addr = 0;
286 u32 flags = n->action_flags;
287 u32 link_id = 0;
288 struct list_head *publ_list;
289
290 if (likely(!flags)) {
291 write_unlock_bh(&n->lock);
292 return;
293 }
294
295 addr = n->addr;
296 link_id = n->link_id;
297 publ_list = &n->publ_list;
298
299 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
300 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
301
302 write_unlock_bh(&n->lock);
303
304 if (flags & TIPC_NOTIFY_NODE_DOWN)
305 tipc_publ_notify(net, publ_list, addr);
306
307 if (flags & TIPC_NOTIFY_NODE_UP)
308 tipc_named_node_up(net, addr);
309
310 if (flags & TIPC_NOTIFY_LINK_UP)
311 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
312 TIPC_NODE_SCOPE, link_id, addr);
313
314 if (flags & TIPC_NOTIFY_LINK_DOWN)
315 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
316 link_id, addr);
317}
318
144struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities) 319struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
145{ 320{
146 struct tipc_net *tn = net_generic(net, tipc_net_id); 321 struct tipc_net *tn = net_generic(net, tipc_net_id);
147 struct tipc_node *n_ptr, *temp_node; 322 struct tipc_node *n, *temp_node;
323 int i;
148 324
149 spin_lock_bh(&tn->node_list_lock); 325 spin_lock_bh(&tn->node_list_lock);
150 n_ptr = tipc_node_find(net, addr); 326 n = tipc_node_find(net, addr);
151 if (n_ptr) 327 if (n)
152 goto exit; 328 goto exit;
153 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); 329 n = kzalloc(sizeof(*n), GFP_ATOMIC);
154 if (!n_ptr) { 330 if (!n) {
155 pr_warn("Node creation failed, no memory\n"); 331 pr_warn("Node creation failed, no memory\n");
156 goto exit; 332 goto exit;
157 } 333 }
158 n_ptr->addr = addr; 334 n->addr = addr;
159 n_ptr->net = net; 335 n->net = net;
160 n_ptr->capabilities = capabilities; 336 n->capabilities = capabilities;
161 kref_init(&n_ptr->kref); 337 kref_init(&n->kref);
162 spin_lock_init(&n_ptr->lock); 338 rwlock_init(&n->lock);
163 INIT_HLIST_NODE(&n_ptr->hash); 339 INIT_HLIST_NODE(&n->hash);
164 INIT_LIST_HEAD(&n_ptr->list); 340 INIT_LIST_HEAD(&n->list);
165 INIT_LIST_HEAD(&n_ptr->publ_list); 341 INIT_LIST_HEAD(&n->publ_list);
166 INIT_LIST_HEAD(&n_ptr->conn_sks); 342 INIT_LIST_HEAD(&n->conn_sks);
167 skb_queue_head_init(&n_ptr->bc_entry.namedq); 343 skb_queue_head_init(&n->bc_entry.namedq);
168 skb_queue_head_init(&n_ptr->bc_entry.inputq1); 344 skb_queue_head_init(&n->bc_entry.inputq1);
169 __skb_queue_head_init(&n_ptr->bc_entry.arrvq); 345 __skb_queue_head_init(&n->bc_entry.arrvq);
170 skb_queue_head_init(&n_ptr->bc_entry.inputq2); 346 skb_queue_head_init(&n->bc_entry.inputq2);
171 hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); 347 for (i = 0; i < MAX_BEARERS; i++)
348 spin_lock_init(&n->links[i].lock);
349 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
172 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 350 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
173 if (n_ptr->addr < temp_node->addr) 351 if (n->addr < temp_node->addr)
174 break; 352 break;
175 } 353 }
176 list_add_tail_rcu(&n_ptr->list, &temp_node->list); 354 list_add_tail_rcu(&n->list, &temp_node->list);
177 n_ptr->state = SELF_DOWN_PEER_LEAVING; 355 n->state = SELF_DOWN_PEER_LEAVING;
178 n_ptr->signature = INVALID_NODE_SIG; 356 n->signature = INVALID_NODE_SIG;
179 n_ptr->active_links[0] = INVALID_BEARER_ID; 357 n->active_links[0] = INVALID_BEARER_ID;
180 n_ptr->active_links[1] = INVALID_BEARER_ID; 358 n->active_links[1] = INVALID_BEARER_ID;
181 if (!tipc_link_bc_create(net, tipc_own_addr(net), n_ptr->addr, 359 if (!tipc_link_bc_create(net, tipc_own_addr(net), n->addr,
182 U16_MAX, tipc_bc_sndlink(net)->window, 360 U16_MAX,
183 n_ptr->capabilities, 361 tipc_link_window(tipc_bc_sndlink(net)),
184 &n_ptr->bc_entry.inputq1, 362 n->capabilities,
185 &n_ptr->bc_entry.namedq, 363 &n->bc_entry.inputq1,
364 &n->bc_entry.namedq,
186 tipc_bc_sndlink(net), 365 tipc_bc_sndlink(net),
187 &n_ptr->bc_entry.link)) { 366 &n->bc_entry.link)) {
188 pr_warn("Broadcast rcv link creation failed, no memory\n"); 367 pr_warn("Broadcast rcv link creation failed, no memory\n");
189 kfree(n_ptr); 368 kfree(n);
190 n_ptr = NULL; 369 n = NULL;
191 goto exit; 370 goto exit;
192 } 371 }
193 tipc_node_get(n_ptr); 372 tipc_node_get(n);
194 setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr); 373 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n);
195 n_ptr->keepalive_intv = U32_MAX; 374 n->keepalive_intv = U32_MAX;
196exit: 375exit:
197 spin_unlock_bh(&tn->node_list_lock); 376 spin_unlock_bh(&tn->node_list_lock);
198 return n_ptr; 377 return n;
199} 378}
200 379
201static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 380static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
202{ 381{
203 unsigned long tol = l->tolerance; 382 unsigned long tol = tipc_link_tolerance(l);
204 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 383 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
205 unsigned long keepalive_intv = msecs_to_jiffies(intv); 384 unsigned long keepalive_intv = msecs_to_jiffies(intv);
206 385
@@ -209,7 +388,7 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
209 n->keepalive_intv = keepalive_intv; 388 n->keepalive_intv = keepalive_intv;
210 389
211 /* Ensure link's abort limit corresponds to current interval */ 390 /* Ensure link's abort limit corresponds to current interval */
212 l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv); 391 tipc_link_set_abort_limit(l, tol / jiffies_to_msecs(n->keepalive_intv));
213} 392}
214 393
215static void tipc_node_delete(struct tipc_node *node) 394static void tipc_node_delete(struct tipc_node *node)
@@ -234,6 +413,42 @@ void tipc_node_stop(struct net *net)
234 spin_unlock_bh(&tn->node_list_lock); 413 spin_unlock_bh(&tn->node_list_lock);
235} 414}
236 415
416void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
417{
418 struct tipc_node *n;
419
420 if (in_own_node(net, addr))
421 return;
422
423 n = tipc_node_find(net, addr);
424 if (!n) {
425 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
426 return;
427 }
428 tipc_node_write_lock(n);
429 list_add_tail(subscr, &n->publ_list);
430 tipc_node_write_unlock(n);
431 tipc_node_put(n);
432}
433
434void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
435{
436 struct tipc_node *n;
437
438 if (in_own_node(net, addr))
439 return;
440
441 n = tipc_node_find(net, addr);
442 if (!n) {
443 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
444 return;
445 }
446 tipc_node_write_lock(n);
447 list_del_init(subscr);
448 tipc_node_write_unlock(n);
449 tipc_node_put(n);
450}
451
237int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 452int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
238{ 453{
239 struct tipc_node *node; 454 struct tipc_node *node;
@@ -257,9 +472,9 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
257 conn->port = port; 472 conn->port = port;
258 conn->peer_port = peer_port; 473 conn->peer_port = peer_port;
259 474
260 tipc_node_lock(node); 475 tipc_node_write_lock(node);
261 list_add_tail(&conn->list, &node->conn_sks); 476 list_add_tail(&conn->list, &node->conn_sks);
262 tipc_node_unlock(node); 477 tipc_node_write_unlock(node);
263exit: 478exit:
264 tipc_node_put(node); 479 tipc_node_put(node);
265 return err; 480 return err;
@@ -277,14 +492,14 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
277 if (!node) 492 if (!node)
278 return; 493 return;
279 494
280 tipc_node_lock(node); 495 tipc_node_write_lock(node);
281 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 496 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
282 if (port != conn->port) 497 if (port != conn->port)
283 continue; 498 continue;
284 list_del(&conn->list); 499 list_del(&conn->list);
285 kfree(conn); 500 kfree(conn);
286 } 501 }
287 tipc_node_unlock(node); 502 tipc_node_write_unlock(node);
288 tipc_node_put(node); 503 tipc_node_put(node);
289} 504}
290 505
@@ -301,14 +516,16 @@ static void tipc_node_timeout(unsigned long data)
301 __skb_queue_head_init(&xmitq); 516 __skb_queue_head_init(&xmitq);
302 517
303 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 518 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
304 tipc_node_lock(n); 519 tipc_node_read_lock(n);
305 le = &n->links[bearer_id]; 520 le = &n->links[bearer_id];
521 spin_lock_bh(&le->lock);
306 if (le->link) { 522 if (le->link) {
307 /* Link tolerance may change asynchronously: */ 523 /* Link tolerance may change asynchronously: */
308 tipc_node_calculate_timer(n, le->link); 524 tipc_node_calculate_timer(n, le->link);
309 rc = tipc_link_timeout(le->link, &xmitq); 525 rc = tipc_link_timeout(le->link, &xmitq);
310 } 526 }
311 tipc_node_unlock(n); 527 spin_unlock_bh(&le->lock);
528 tipc_node_read_unlock(n);
312 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); 529 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
313 if (rc & TIPC_LINK_DOWN_EVT) 530 if (rc & TIPC_LINK_DOWN_EVT)
314 tipc_node_link_down(n, bearer_id, false); 531 tipc_node_link_down(n, bearer_id, false);
@@ -340,16 +557,16 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
340 557
341 n->working_links++; 558 n->working_links++;
342 n->action_flags |= TIPC_NOTIFY_LINK_UP; 559 n->action_flags |= TIPC_NOTIFY_LINK_UP;
343 n->link_id = nl->peer_bearer_id << 16 | bearer_id; 560 n->link_id = tipc_link_id(nl);
344 561
345 /* Leave room for tunnel header when returning 'mtu' to users: */ 562 /* Leave room for tunnel header when returning 'mtu' to users: */
346 n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE; 563 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
347 564
348 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 565 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
349 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 566 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
350 567
351 pr_debug("Established link <%s> on network plane %c\n", 568 pr_debug("Established link <%s> on network plane %c\n",
352 nl->name, nl->net_plane); 569 tipc_link_name(nl), tipc_link_plane(nl));
353 570
354 /* First link? => give it both slots */ 571 /* First link? => give it both slots */
355 if (!ol) { 572 if (!ol) {
@@ -362,17 +579,17 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
362 } 579 }
363 580
364 /* Second link => redistribute slots */ 581 /* Second link => redistribute slots */
365 if (nl->priority > ol->priority) { 582 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
366 pr_debug("Old link <%s> becomes standby\n", ol->name); 583 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
367 *slot0 = bearer_id; 584 *slot0 = bearer_id;
368 *slot1 = bearer_id; 585 *slot1 = bearer_id;
369 tipc_link_set_active(nl, true); 586 tipc_link_set_active(nl, true);
370 tipc_link_set_active(ol, false); 587 tipc_link_set_active(ol, false);
371 } else if (nl->priority == ol->priority) { 588 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
372 tipc_link_set_active(nl, true); 589 tipc_link_set_active(nl, true);
373 *slot1 = bearer_id; 590 *slot1 = bearer_id;
374 } else { 591 } else {
375 pr_debug("New link <%s> is standby\n", nl->name); 592 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
376 } 593 }
377 594
378 /* Prepare synchronization with first link */ 595 /* Prepare synchronization with first link */
@@ -387,9 +604,9 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
387static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 604static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
388 struct sk_buff_head *xmitq) 605 struct sk_buff_head *xmitq)
389{ 606{
390 tipc_node_lock(n); 607 tipc_node_write_lock(n);
391 __tipc_node_link_up(n, bearer_id, xmitq); 608 __tipc_node_link_up(n, bearer_id, xmitq);
392 tipc_node_unlock(n); 609 tipc_node_write_unlock(n);
393} 610}
394 611
395/** 612/**
@@ -402,7 +619,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
402 struct tipc_link_entry *le = &n->links[*bearer_id]; 619 struct tipc_link_entry *le = &n->links[*bearer_id];
403 int *slot0 = &n->active_links[0]; 620 int *slot0 = &n->active_links[0];
404 int *slot1 = &n->active_links[1]; 621 int *slot1 = &n->active_links[1];
405 int i, highest = 0; 622 int i, highest = 0, prio;
406 struct tipc_link *l, *_l, *tnl; 623 struct tipc_link *l, *_l, *tnl;
407 624
408 l = n->links[*bearer_id].link; 625 l = n->links[*bearer_id].link;
@@ -411,12 +628,12 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
411 628
412 n->working_links--; 629 n->working_links--;
413 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 630 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
414 n->link_id = l->peer_bearer_id << 16 | *bearer_id; 631 n->link_id = tipc_link_id(l);
415 632
416 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 633 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
417 634
418 pr_debug("Lost link <%s> on network plane %c\n", 635 pr_debug("Lost link <%s> on network plane %c\n",
419 l->name, l->net_plane); 636 tipc_link_name(l), tipc_link_plane(l));
420 637
421 /* Select new active link if any available */ 638 /* Select new active link if any available */
422 *slot0 = INVALID_BEARER_ID; 639 *slot0 = INVALID_BEARER_ID;
@@ -427,10 +644,11 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
427 continue; 644 continue;
428 if (_l == l) 645 if (_l == l)
429 continue; 646 continue;
430 if (_l->priority < highest) 647 prio = tipc_link_prio(_l);
648 if (prio < highest)
431 continue; 649 continue;
432 if (_l->priority > highest) { 650 if (prio > highest) {
433 highest = _l->priority; 651 highest = prio;
434 *slot0 = i; 652 *slot0 = i;
435 *slot1 = i; 653 *slot1 = i;
436 continue; 654 continue;
@@ -453,17 +671,17 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
453 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 671 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
454 672
455 /* There is still a working link => initiate failover */ 673 /* There is still a working link => initiate failover */
456 tnl = node_active_link(n, 0); 674 *bearer_id = n->active_links[0];
675 tnl = n->links[*bearer_id].link;
457 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 676 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
458 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 677 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
459 n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1); 678 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
460 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 679 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
461 tipc_link_reset(l); 680 tipc_link_reset(l);
462 tipc_link_fsm_evt(l, LINK_RESET_EVT); 681 tipc_link_fsm_evt(l, LINK_RESET_EVT);
463 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 682 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
464 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 683 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
465 *maddr = &n->links[tnl->bearer_id].maddr; 684 *maddr = &n->links[*bearer_id].maddr;
466 *bearer_id = tnl->bearer_id;
467} 685}
468 686
469static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 687static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
@@ -478,7 +696,7 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
478 696
479 __skb_queue_head_init(&xmitq); 697 __skb_queue_head_init(&xmitq);
480 698
481 tipc_node_lock(n); 699 tipc_node_write_lock(n);
482 if (!tipc_link_is_establishing(l)) { 700 if (!tipc_link_is_establishing(l)) {
483 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 701 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
484 if (delete) { 702 if (delete) {
@@ -490,12 +708,12 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
490 /* Defuse pending tipc_node_link_up() */ 708 /* Defuse pending tipc_node_link_up() */
491 tipc_link_fsm_evt(l, LINK_RESET_EVT); 709 tipc_link_fsm_evt(l, LINK_RESET_EVT);
492 } 710 }
493 tipc_node_unlock(n); 711 tipc_node_write_unlock(n);
494 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); 712 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
495 tipc_sk_rcv(n->net, &le->inputq); 713 tipc_sk_rcv(n->net, &le->inputq);
496} 714}
497 715
498bool tipc_node_is_up(struct tipc_node *n) 716static bool tipc_node_is_up(struct tipc_node *n)
499{ 717{
500 return n->active_links[0] != INVALID_BEARER_ID; 718 return n->active_links[0] != INVALID_BEARER_ID;
501} 719}
@@ -523,7 +741,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
523 if (!n) 741 if (!n)
524 return; 742 return;
525 743
526 tipc_node_lock(n); 744 tipc_node_write_lock(n);
527 745
528 le = &n->links[b->identity]; 746 le = &n->links[b->identity];
529 747
@@ -626,7 +844,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
626 } 844 }
627 memcpy(&le->maddr, maddr, sizeof(*maddr)); 845 memcpy(&le->maddr, maddr, sizeof(*maddr));
628exit: 846exit:
629 tipc_node_unlock(n); 847 tipc_node_write_unlock(n);
630 if (reset && !tipc_link_is_reset(l)) 848 if (reset && !tipc_link_is_reset(l))
631 tipc_node_link_down(n, b->identity, false); 849 tipc_node_link_down(n, b->identity, false);
632 tipc_node_put(n); 850 tipc_node_put(n);
@@ -834,24 +1052,6 @@ illegal_evt:
834 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1052 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
835} 1053}
836 1054
837bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
838{
839 int state = n->state;
840
841 if (likely(state == SELF_UP_PEER_UP))
842 return true;
843
844 if (state == SELF_LEAVING_PEER_DOWN)
845 return false;
846
847 if (state == SELF_DOWN_PEER_LEAVING) {
848 if (msg_peer_node_is_up(hdr))
849 return false;
850 }
851
852 return true;
853}
854
855static void node_lost_contact(struct tipc_node *n, 1055static void node_lost_contact(struct tipc_node *n,
856 struct sk_buff_head *inputq) 1056 struct sk_buff_head *inputq)
857{ 1057{
@@ -913,56 +1113,18 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
913 if (bearer_id >= MAX_BEARERS) 1113 if (bearer_id >= MAX_BEARERS)
914 goto exit; 1114 goto exit;
915 1115
916 tipc_node_lock(node); 1116 tipc_node_read_lock(node);
917 link = node->links[bearer_id].link; 1117 link = node->links[bearer_id].link;
918 if (link) { 1118 if (link) {
919 strncpy(linkname, link->name, len); 1119 strncpy(linkname, tipc_link_name(link), len);
920 err = 0; 1120 err = 0;
921 } 1121 }
922exit: 1122exit:
923 tipc_node_unlock(node); 1123 tipc_node_read_unlock(node);
924 tipc_node_put(node); 1124 tipc_node_put(node);
925 return err; 1125 return err;
926} 1126}
927 1127
928void tipc_node_unlock(struct tipc_node *node)
929{
930 struct net *net = node->net;
931 u32 addr = 0;
932 u32 flags = node->action_flags;
933 u32 link_id = 0;
934 struct list_head *publ_list;
935
936 if (likely(!flags)) {
937 spin_unlock_bh(&node->lock);
938 return;
939 }
940
941 addr = node->addr;
942 link_id = node->link_id;
943 publ_list = &node->publ_list;
944
945 node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
946 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
947
948 spin_unlock_bh(&node->lock);
949
950 if (flags & TIPC_NOTIFY_NODE_DOWN)
951 tipc_publ_notify(net, publ_list, addr);
952
953 if (flags & TIPC_NOTIFY_NODE_UP)
954 tipc_named_node_up(net, addr);
955
956 if (flags & TIPC_NOTIFY_LINK_UP)
957 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
958 TIPC_NODE_SCOPE, link_id, addr);
959
960 if (flags & TIPC_NOTIFY_LINK_DOWN)
961 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
962 link_id, addr);
963
964}
965
966/* Caller should hold node lock for the passed node */ 1128/* Caller should hold node lock for the passed node */
967static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1129static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
968{ 1130{
@@ -997,20 +1159,6 @@ msg_full:
997 return -EMSGSIZE; 1159 return -EMSGSIZE;
998} 1160}
999 1161
1000static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
1001 int *bearer_id,
1002 struct tipc_media_addr **maddr)
1003{
1004 int id = n->active_links[sel & 1];
1005
1006 if (unlikely(id < 0))
1007 return NULL;
1008
1009 *bearer_id = id;
1010 *maddr = &n->links[id].maddr;
1011 return n->links[id].link;
1012}
1013
1014/** 1162/**
1015 * tipc_node_xmit() is the general link level function for message sending 1163 * tipc_node_xmit() is the general link level function for message sending
1016 * @net: the applicable net namespace 1164 * @net: the applicable net namespace
@@ -1023,34 +1171,38 @@ static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
1023int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1171int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1024 u32 dnode, int selector) 1172 u32 dnode, int selector)
1025{ 1173{
1026 struct tipc_link *l = NULL; 1174 struct tipc_link_entry *le = NULL;
1027 struct tipc_node *n; 1175 struct tipc_node *n;
1028 struct sk_buff_head xmitq; 1176 struct sk_buff_head xmitq;
1029 struct tipc_media_addr *maddr; 1177 int bearer_id = -1;
1030 int bearer_id;
1031 int rc = -EHOSTUNREACH; 1178 int rc = -EHOSTUNREACH;
1032 1179
1033 __skb_queue_head_init(&xmitq); 1180 __skb_queue_head_init(&xmitq);
1034 n = tipc_node_find(net, dnode); 1181 n = tipc_node_find(net, dnode);
1035 if (likely(n)) { 1182 if (likely(n)) {
1036 tipc_node_lock(n); 1183 tipc_node_read_lock(n);
1037 l = tipc_node_select_link(n, selector, &bearer_id, &maddr); 1184 bearer_id = n->active_links[selector & 1];
1038 if (likely(l)) 1185 if (bearer_id >= 0) {
1039 rc = tipc_link_xmit(l, list, &xmitq); 1186 le = &n->links[bearer_id];
1040 tipc_node_unlock(n); 1187 spin_lock_bh(&le->lock);
1188 rc = tipc_link_xmit(le->link, list, &xmitq);
1189 spin_unlock_bh(&le->lock);
1190 }
1191 tipc_node_read_unlock(n);
1192 if (likely(!skb_queue_empty(&xmitq))) {
1193 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1194 return 0;
1195 }
1041 if (unlikely(rc == -ENOBUFS)) 1196 if (unlikely(rc == -ENOBUFS))
1042 tipc_node_link_down(n, bearer_id, false); 1197 tipc_node_link_down(n, bearer_id, false);
1043 tipc_node_put(n); 1198 tipc_node_put(n);
1199 return rc;
1044 } 1200 }
1045 if (likely(!rc)) { 1201
1046 tipc_bearer_xmit(net, bearer_id, &xmitq, maddr); 1202 if (unlikely(!in_own_node(net, dnode)))
1047 return 0; 1203 return rc;
1048 } 1204 tipc_sk_rcv(net, list);
1049 if (likely(in_own_node(net, dnode))) { 1205 return 0;
1050 tipc_sk_rcv(net, list);
1051 return 0;
1052 }
1053 return rc;
1054} 1206}
1055 1207
1056/* tipc_node_xmit_skb(): send single buffer to destination 1208/* tipc_node_xmit_skb(): send single buffer to destination
@@ -1075,6 +1227,30 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1075 return 0; 1227 return 0;
1076} 1228}
1077 1229
1230void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
1231{
1232 struct sk_buff *txskb;
1233 struct tipc_node *n;
1234 u32 dst;
1235
1236 rcu_read_lock();
1237 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1238 dst = n->addr;
1239 if (in_own_node(net, dst))
1240 continue;
1241 if (!tipc_node_is_up(n))
1242 continue;
1243 txskb = pskb_copy(skb, GFP_ATOMIC);
1244 if (!txskb)
1245 break;
1246 msg_set_destnode(buf_msg(txskb), dst);
1247 tipc_node_xmit_skb(net, txskb, dst, 0);
1248 }
1249 rcu_read_unlock();
1250
1251 kfree_skb(skb);
1252}
1253
1078/** 1254/**
1079 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1255 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1080 * @net: the applicable net namespace 1256 * @net: the applicable net namespace
@@ -1116,9 +1292,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
1116 1292
1117 /* Broadcast ACKs are sent on a unicast link */ 1293 /* Broadcast ACKs are sent on a unicast link */
1118 if (rc & TIPC_LINK_SND_BC_ACK) { 1294 if (rc & TIPC_LINK_SND_BC_ACK) {
1119 tipc_node_lock(n); 1295 tipc_node_read_lock(n);
1120 tipc_link_build_ack_msg(le->link, &xmitq); 1296 tipc_link_build_ack_msg(le->link, &xmitq);
1121 tipc_node_unlock(n); 1297 tipc_node_read_unlock(n);
1122 } 1298 }
1123 1299
1124 if (!skb_queue_empty(&xmitq)) 1300 if (!skb_queue_empty(&xmitq))
@@ -1151,30 +1327,30 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1151 u16 oseqno = msg_seqno(hdr); 1327 u16 oseqno = msg_seqno(hdr);
1152 u16 iseqno = msg_seqno(msg_get_wrapped(hdr)); 1328 u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
1153 u16 exp_pkts = msg_msgcnt(hdr); 1329 u16 exp_pkts = msg_msgcnt(hdr);
1154 u16 rcv_nxt, syncpt, dlv_nxt; 1330 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1155 int state = n->state; 1331 int state = n->state;
1156 struct tipc_link *l, *tnl, *pl = NULL; 1332 struct tipc_link *l, *tnl, *pl = NULL;
1157 struct tipc_media_addr *maddr; 1333 struct tipc_media_addr *maddr;
1158 int i, pb_id; 1334 int pb_id;
1159 1335
1160 l = n->links[bearer_id].link; 1336 l = n->links[bearer_id].link;
1161 if (!l) 1337 if (!l)
1162 return false; 1338 return false;
1163 rcv_nxt = l->rcv_nxt; 1339 rcv_nxt = tipc_link_rcv_nxt(l);
1164 1340
1165 1341
1166 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1342 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1167 return true; 1343 return true;
1168 1344
1169 /* Find parallel link, if any */ 1345 /* Find parallel link, if any */
1170 for (i = 0; i < MAX_BEARERS; i++) { 1346 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1171 if ((i != bearer_id) && n->links[i].link) { 1347 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1172 pl = n->links[i].link; 1348 pl = n->links[pb_id].link;
1173 break; 1349 break;
1174 } 1350 }
1175 } 1351 }
1176 1352
1177 /* Update node accesibility if applicable */ 1353 /* Check and update node accesibility if applicable */
1178 if (state == SELF_UP_PEER_COMING) { 1354 if (state == SELF_UP_PEER_COMING) {
1179 if (!tipc_link_is_up(l)) 1355 if (!tipc_link_is_up(l))
1180 return true; 1356 return true;
@@ -1187,8 +1363,12 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1187 if (msg_peer_node_is_up(hdr)) 1363 if (msg_peer_node_is_up(hdr))
1188 return false; 1364 return false;
1189 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1365 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1366 return true;
1190 } 1367 }
1191 1368
1369 if (state == SELF_LEAVING_PEER_DOWN)
1370 return false;
1371
1192 /* Ignore duplicate packets */ 1372 /* Ignore duplicate packets */
1193 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1373 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1194 return true; 1374 return true;
@@ -1197,9 +1377,9 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1197 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1377 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1198 syncpt = oseqno + exp_pkts - 1; 1378 syncpt = oseqno + exp_pkts - 1;
1199 if (pl && tipc_link_is_up(pl)) { 1379 if (pl && tipc_link_is_up(pl)) {
1200 pb_id = pl->bearer_id;
1201 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1380 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1202 tipc_skb_queue_splice_tail_init(pl->inputq, l->inputq); 1381 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1382 tipc_link_inputq(l));
1203 } 1383 }
1204 /* If pkts arrive out of order, use lowest calculated syncpt */ 1384 /* If pkts arrive out of order, use lowest calculated syncpt */
1205 if (less(syncpt, n->sync_point)) 1385 if (less(syncpt, n->sync_point))
@@ -1232,19 +1412,18 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1232 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 1412 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
1233 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 1413 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
1234 } 1414 }
1235 if (less(syncpt, n->sync_point))
1236 n->sync_point = syncpt;
1237 } 1415 }
1238 1416
1239 /* Open tunnel link when parallel link reaches synch point */ 1417 /* Open tunnel link when parallel link reaches synch point */
1240 if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) { 1418 if (n->state == NODE_SYNCHING) {
1241 if (tipc_link_is_synching(l)) { 1419 if (tipc_link_is_synching(l)) {
1242 tnl = l; 1420 tnl = l;
1243 } else { 1421 } else {
1244 tnl = pl; 1422 tnl = pl;
1245 pl = l; 1423 pl = l;
1246 } 1424 }
1247 dlv_nxt = pl->rcv_nxt - mod(skb_queue_len(pl->inputq)); 1425 inputq_len = skb_queue_len(tipc_link_inputq(pl));
1426 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
1248 if (more(dlv_nxt, n->sync_point)) { 1427 if (more(dlv_nxt, n->sync_point)) {
1249 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1428 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1250 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1429 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
@@ -1304,22 +1483,32 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
1304 /* Ensure broadcast reception is in synch with peer's send state */ 1483 /* Ensure broadcast reception is in synch with peer's send state */
1305 if (unlikely(usr == LINK_PROTOCOL)) 1484 if (unlikely(usr == LINK_PROTOCOL))
1306 tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr); 1485 tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr);
1307 else if (unlikely(n->bc_entry.link->acked != bc_ack)) 1486 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
1308 tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack); 1487 tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
1309 1488
1310 tipc_node_lock(n); 1489 /* Receive packet directly if conditions permit */
1311 1490 tipc_node_read_lock(n);
1312 /* Is reception permitted at the moment ? */ 1491 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
1313 if (!tipc_node_filter_pkt(n, hdr)) 1492 spin_lock_bh(&le->lock);
1314 goto unlock; 1493 if (le->link) {
1315 1494 rc = tipc_link_rcv(le->link, skb, &xmitq);
1316 /* Check and if necessary update node state */ 1495 skb = NULL;
1317 if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) { 1496 }
1318 rc = tipc_link_rcv(le->link, skb, &xmitq); 1497 spin_unlock_bh(&le->lock);
1319 skb = NULL; 1498 }
1499 tipc_node_read_unlock(n);
1500
1501 /* Check/update node state before receiving */
1502 if (unlikely(skb)) {
1503 tipc_node_write_lock(n);
1504 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
1505 if (le->link) {
1506 rc = tipc_link_rcv(le->link, skb, &xmitq);
1507 skb = NULL;
1508 }
1509 }
1510 tipc_node_write_unlock(n);
1320 } 1511 }
1321unlock:
1322 tipc_node_unlock(n);
1323 1512
1324 if (unlikely(rc & TIPC_LINK_UP_EVT)) 1513 if (unlikely(rc & TIPC_LINK_UP_EVT))
1325 tipc_node_link_up(n, bearer_id, &xmitq); 1514 tipc_node_link_up(n, bearer_id, &xmitq);
@@ -1384,15 +1573,15 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
1384 continue; 1573 continue;
1385 } 1574 }
1386 1575
1387 tipc_node_lock(node); 1576 tipc_node_read_lock(node);
1388 err = __tipc_nl_add_node(&msg, node); 1577 err = __tipc_nl_add_node(&msg, node);
1389 if (err) { 1578 if (err) {
1390 last_addr = node->addr; 1579 last_addr = node->addr;
1391 tipc_node_unlock(node); 1580 tipc_node_read_unlock(node);
1392 goto out; 1581 goto out;
1393 } 1582 }
1394 1583
1395 tipc_node_unlock(node); 1584 tipc_node_read_unlock(node);
1396 } 1585 }
1397 done = 1; 1586 done = 1;
1398out: 1587out:
@@ -1402,3 +1591,314 @@ out:
1402 1591
1403 return skb->len; 1592 return skb->len;
1404} 1593}
1594
1595/* tipc_node_find_by_name - locate owner node of link by link's name
1596 * @net: the applicable net namespace
1597 * @name: pointer to link name string
1598 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1599 *
1600 * Returns pointer to node owning the link, or 0 if no matching link is found.
1601 */
1602static struct tipc_node *tipc_node_find_by_name(struct net *net,
1603 const char *link_name,
1604 unsigned int *bearer_id)
1605{
1606 struct tipc_net *tn = net_generic(net, tipc_net_id);
1607 struct tipc_link *l;
1608 struct tipc_node *n;
1609 struct tipc_node *found_node = NULL;
1610 int i;
1611
1612 *bearer_id = 0;
1613 rcu_read_lock();
1614 list_for_each_entry_rcu(n, &tn->node_list, list) {
1615 tipc_node_read_lock(n);
1616 for (i = 0; i < MAX_BEARERS; i++) {
1617 l = n->links[i].link;
1618 if (l && !strcmp(tipc_link_name(l), link_name)) {
1619 *bearer_id = i;
1620 found_node = n;
1621 break;
1622 }
1623 }
1624 tipc_node_read_unlock(n);
1625 if (found_node)
1626 break;
1627 }
1628 rcu_read_unlock();
1629
1630 return found_node;
1631}
1632
1633int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
1634{
1635 int err;
1636 int res = 0;
1637 int bearer_id;
1638 char *name;
1639 struct tipc_link *link;
1640 struct tipc_node *node;
1641 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1642 struct net *net = sock_net(skb->sk);
1643
1644 if (!info->attrs[TIPC_NLA_LINK])
1645 return -EINVAL;
1646
1647 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1648 info->attrs[TIPC_NLA_LINK],
1649 tipc_nl_link_policy);
1650 if (err)
1651 return err;
1652
1653 if (!attrs[TIPC_NLA_LINK_NAME])
1654 return -EINVAL;
1655
1656 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1657
1658 if (strcmp(name, tipc_bclink_name) == 0)
1659 return tipc_nl_bc_link_set(net, attrs);
1660
1661 node = tipc_node_find_by_name(net, name, &bearer_id);
1662 if (!node)
1663 return -EINVAL;
1664
1665 tipc_node_read_lock(node);
1666
1667 link = node->links[bearer_id].link;
1668 if (!link) {
1669 res = -EINVAL;
1670 goto out;
1671 }
1672
1673 if (attrs[TIPC_NLA_LINK_PROP]) {
1674 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1675
1676 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1677 props);
1678 if (err) {
1679 res = err;
1680 goto out;
1681 }
1682
1683 if (props[TIPC_NLA_PROP_TOL]) {
1684 u32 tol;
1685
1686 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1687 tipc_link_set_tolerance(link, tol);
1688 }
1689 if (props[TIPC_NLA_PROP_PRIO]) {
1690 u32 prio;
1691
1692 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1693 tipc_link_set_prio(link, prio);
1694 }
1695 if (props[TIPC_NLA_PROP_WIN]) {
1696 u32 win;
1697
1698 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1699 tipc_link_set_queue_limits(link, win);
1700 }
1701 }
1702
1703out:
1704 tipc_node_read_unlock(node);
1705
1706 return res;
1707}
1708
1709int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
1710{
1711 struct net *net = genl_info_net(info);
1712 struct tipc_nl_msg msg;
1713 char *name;
1714 int err;
1715
1716 msg.portid = info->snd_portid;
1717 msg.seq = info->snd_seq;
1718
1719 if (!info->attrs[TIPC_NLA_LINK_NAME])
1720 return -EINVAL;
1721 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
1722
1723 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1724 if (!msg.skb)
1725 return -ENOMEM;
1726
1727 if (strcmp(name, tipc_bclink_name) == 0) {
1728 err = tipc_nl_add_bc_link(net, &msg);
1729 if (err) {
1730 nlmsg_free(msg.skb);
1731 return err;
1732 }
1733 } else {
1734 int bearer_id;
1735 struct tipc_node *node;
1736 struct tipc_link *link;
1737
1738 node = tipc_node_find_by_name(net, name, &bearer_id);
1739 if (!node)
1740 return -EINVAL;
1741
1742 tipc_node_read_lock(node);
1743 link = node->links[bearer_id].link;
1744 if (!link) {
1745 tipc_node_read_unlock(node);
1746 nlmsg_free(msg.skb);
1747 return -EINVAL;
1748 }
1749
1750 err = __tipc_nl_add_link(net, &msg, link, 0);
1751 tipc_node_read_unlock(node);
1752 if (err) {
1753 nlmsg_free(msg.skb);
1754 return err;
1755 }
1756 }
1757
1758 return genlmsg_reply(msg.skb, info);
1759}
1760
1761int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
1762{
1763 int err;
1764 char *link_name;
1765 unsigned int bearer_id;
1766 struct tipc_link *link;
1767 struct tipc_node *node;
1768 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1769 struct net *net = sock_net(skb->sk);
1770 struct tipc_link_entry *le;
1771
1772 if (!info->attrs[TIPC_NLA_LINK])
1773 return -EINVAL;
1774
1775 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1776 info->attrs[TIPC_NLA_LINK],
1777 tipc_nl_link_policy);
1778 if (err)
1779 return err;
1780
1781 if (!attrs[TIPC_NLA_LINK_NAME])
1782 return -EINVAL;
1783
1784 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1785
1786 if (strcmp(link_name, tipc_bclink_name) == 0) {
1787 err = tipc_bclink_reset_stats(net);
1788 if (err)
1789 return err;
1790 return 0;
1791 }
1792
1793 node = tipc_node_find_by_name(net, link_name, &bearer_id);
1794 if (!node)
1795 return -EINVAL;
1796
1797 le = &node->links[bearer_id];
1798 tipc_node_read_lock(node);
1799 spin_lock_bh(&le->lock);
1800 link = node->links[bearer_id].link;
1801 if (!link) {
1802 spin_unlock_bh(&le->lock);
1803 tipc_node_read_unlock(node);
1804 return -EINVAL;
1805 }
1806 tipc_link_reset_stats(link);
1807 spin_unlock_bh(&le->lock);
1808 tipc_node_read_unlock(node);
1809 return 0;
1810}
1811
1812/* Caller should hold node lock */
1813static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
1814 struct tipc_node *node, u32 *prev_link)
1815{
1816 u32 i;
1817 int err;
1818
1819 for (i = *prev_link; i < MAX_BEARERS; i++) {
1820 *prev_link = i;
1821
1822 if (!node->links[i].link)
1823 continue;
1824
1825 err = __tipc_nl_add_link(net, msg,
1826 node->links[i].link, NLM_F_MULTI);
1827 if (err)
1828 return err;
1829 }
1830 *prev_link = 0;
1831
1832 return 0;
1833}
1834
1835int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
1836{
1837 struct net *net = sock_net(skb->sk);
1838 struct tipc_net *tn = net_generic(net, tipc_net_id);
1839 struct tipc_node *node;
1840 struct tipc_nl_msg msg;
1841 u32 prev_node = cb->args[0];
1842 u32 prev_link = cb->args[1];
1843 int done = cb->args[2];
1844 int err;
1845
1846 if (done)
1847 return 0;
1848
1849 msg.skb = skb;
1850 msg.portid = NETLINK_CB(cb->skb).portid;
1851 msg.seq = cb->nlh->nlmsg_seq;
1852
1853 rcu_read_lock();
1854 if (prev_node) {
1855 node = tipc_node_find(net, prev_node);
1856 if (!node) {
1857 /* We never set seq or call nl_dump_check_consistent()
1858 * this means that setting prev_seq here will cause the
1859 * consistence check to fail in the netlink callback
1860 * handler. Resulting in the last NLMSG_DONE message
1861 * having the NLM_F_DUMP_INTR flag set.
1862 */
1863 cb->prev_seq = 1;
1864 goto out;
1865 }
1866 tipc_node_put(node);
1867
1868 list_for_each_entry_continue_rcu(node, &tn->node_list,
1869 list) {
1870 tipc_node_read_lock(node);
1871 err = __tipc_nl_add_node_links(net, &msg, node,
1872 &prev_link);
1873 tipc_node_read_unlock(node);
1874 if (err)
1875 goto out;
1876
1877 prev_node = node->addr;
1878 }
1879 } else {
1880 err = tipc_nl_add_bc_link(net, &msg);
1881 if (err)
1882 goto out;
1883
1884 list_for_each_entry_rcu(node, &tn->node_list, list) {
1885 tipc_node_read_lock(node);
1886 err = __tipc_nl_add_node_links(net, &msg, node,
1887 &prev_link);
1888 tipc_node_read_unlock(node);
1889 if (err)
1890 goto out;
1891
1892 prev_node = node->addr;
1893 }
1894 }
1895 done = 1;
1896out:
1897 rcu_read_unlock();
1898
1899 cb->args[0] = prev_node;
1900 cb->args[1] = prev_link;
1901 cb->args[2] = done;
1902
1903 return skb->len;
1904}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 6734562d3c6e..f39d9d06e8bb 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -42,23 +42,6 @@
42#include "bearer.h" 42#include "bearer.h"
43#include "msg.h" 43#include "msg.h"
44 44
45/* Out-of-range value for node signature */
46#define INVALID_NODE_SIG 0x10000
47
48#define INVALID_BEARER_ID -1
49
50/* Flags used to take different actions according to flag type
51 * TIPC_NOTIFY_NODE_DOWN: notify node is down
52 * TIPC_NOTIFY_NODE_UP: notify node is up
53 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
54 */
55enum {
56 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
57 TIPC_NOTIFY_NODE_UP = (1 << 4),
58 TIPC_NOTIFY_LINK_UP = (1 << 6),
59 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
60};
61
62/* Optional capabilities supported by this code version 45/* Optional capabilities supported by this code version
63 */ 46 */
64enum { 47enum {
@@ -66,72 +49,8 @@ enum {
66}; 49};
67 50
68#define TIPC_NODE_CAPABILITIES TIPC_BCAST_SYNCH 51#define TIPC_NODE_CAPABILITIES TIPC_BCAST_SYNCH
52#define INVALID_BEARER_ID -1
69 53
70struct tipc_link_entry {
71 struct tipc_link *link;
72 u32 mtu;
73 struct sk_buff_head inputq;
74 struct tipc_media_addr maddr;
75};
76
77struct tipc_bclink_entry {
78 struct tipc_link *link;
79 struct sk_buff_head inputq1;
80 struct sk_buff_head arrvq;
81 struct sk_buff_head inputq2;
82 struct sk_buff_head namedq;
83};
84
85/**
86 * struct tipc_node - TIPC node structure
87 * @addr: network address of node
88 * @ref: reference counter to node object
89 * @lock: spinlock governing access to structure
90 * @net: the applicable net namespace
91 * @hash: links to adjacent nodes in unsorted hash chain
92 * @inputq: pointer to input queue containing messages for msg event
93 * @namedq: pointer to name table input queue with name table messages
94 * @active_links: bearer ids of active links, used as index into links[] array
95 * @links: array containing references to all links to node
96 * @action_flags: bit mask of different types of node actions
97 * @state: connectivity state vs peer node
98 * @sync_point: sequence number where synch/failover is finished
99 * @list: links to adjacent nodes in sorted list of cluster's nodes
100 * @working_links: number of working links to node (both active and standby)
101 * @link_cnt: number of links to node
102 * @capabilities: bitmap, indicating peer node's functional capabilities
103 * @signature: node instance identifier
104 * @link_id: local and remote bearer ids of changing link, if any
105 * @publ_list: list of publications
106 * @rcu: rcu struct for tipc_node
107 */
108struct tipc_node {
109 u32 addr;
110 struct kref kref;
111 spinlock_t lock;
112 struct net *net;
113 struct hlist_node hash;
114 int active_links[2];
115 struct tipc_link_entry links[MAX_BEARERS];
116 struct tipc_bclink_entry bc_entry;
117 int action_flags;
118 struct list_head list;
119 int state;
120 u16 sync_point;
121 int link_cnt;
122 u16 working_links;
123 u16 capabilities;
124 u32 signature;
125 u32 link_id;
126 struct list_head publ_list;
127 struct list_head conn_sks;
128 unsigned long keepalive_intv;
129 struct timer_list timer;
130 struct rcu_head rcu;
131};
132
133struct tipc_node *tipc_node_find(struct net *net, u32 addr);
134void tipc_node_put(struct tipc_node *node);
135void tipc_node_stop(struct net *net); 54void tipc_node_stop(struct net *net);
136void tipc_node_check_dest(struct net *net, u32 onode, 55void tipc_node_check_dest(struct net *net, u32 onode,
137 struct tipc_bearer *bearer, 56 struct tipc_bearer *bearer,
@@ -139,50 +58,22 @@ void tipc_node_check_dest(struct net *net, u32 onode,
139 struct tipc_media_addr *maddr, 58 struct tipc_media_addr *maddr,
140 bool *respond, bool *dupl_addr); 59 bool *respond, bool *dupl_addr);
141void tipc_node_delete_links(struct net *net, int bearer_id); 60void tipc_node_delete_links(struct net *net, int bearer_id);
142void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
143void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
144bool tipc_node_is_up(struct tipc_node *n);
145int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node, 61int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
146 char *linkname, size_t len); 62 char *linkname, size_t len);
147void tipc_node_unlock(struct tipc_node *node);
148int tipc_node_xmit(struct net *net, struct sk_buff_head *list, u32 dnode, 63int tipc_node_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
149 int selector); 64 int selector);
150int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, 65int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
151 u32 selector); 66 u32 selector);
67void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr);
68void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr);
69void tipc_node_broadcast(struct net *net, struct sk_buff *skb);
152int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); 70int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
153void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); 71void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
72int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel);
154int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb); 73int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
155 74int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb);
156static inline void tipc_node_lock(struct tipc_node *node) 75int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info);
157{ 76int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info);
158 spin_lock_bh(&node->lock); 77int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info);
159}
160
161static inline struct tipc_link *node_active_link(struct tipc_node *n, int sel)
162{
163 int bearer_id = n->active_links[sel & 1];
164
165 if (unlikely(bearer_id == INVALID_BEARER_ID))
166 return NULL;
167
168 return n->links[bearer_id].link;
169}
170
171static inline unsigned int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
172{
173 struct tipc_node *n;
174 int bearer_id;
175 unsigned int mtu = MAX_MSG_SIZE;
176
177 n = tipc_node_find(net, addr);
178 if (unlikely(!n))
179 return mtu;
180
181 bearer_id = n->active_links[sel & 1];
182 if (likely(bearer_id != INVALID_BEARER_ID))
183 mtu = n->links[bearer_id].mtu;
184 tipc_node_put(n);
185 return mtu;
186}
187 78
188#endif 79#endif
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index ad2719ad4c1b..816914ef228d 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -48,7 +48,6 @@
48#include <linux/tipc_netlink.h> 48#include <linux/tipc_netlink.h>
49#include "core.h" 49#include "core.h"
50#include "bearer.h" 50#include "bearer.h"
51#include "msg.h"
52 51
53/* IANA assigned UDP port */ 52/* IANA assigned UDP port */
54#define UDP_PORT_DEFAULT 6118 53#define UDP_PORT_DEFAULT 6118
@@ -221,10 +220,6 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
221{ 220{
222 struct udp_bearer *ub; 221 struct udp_bearer *ub;
223 struct tipc_bearer *b; 222 struct tipc_bearer *b;
224 int usr = msg_user(buf_msg(skb));
225
226 if ((usr == LINK_PROTOCOL) || (usr == NAME_DISTRIBUTOR))
227 skb_linearize(skb);
228 223
229 ub = rcu_dereference_sk_user_data(sk); 224 ub = rcu_dereference_sk_user_data(sk);
230 if (!ub) { 225 if (!ub) {
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aaa0b58d6aba..955ec152cb71 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -441,6 +441,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
441 if (state == TCP_LISTEN) 441 if (state == TCP_LISTEN)
442 unix_release_sock(skb->sk, 1); 442 unix_release_sock(skb->sk, 1);
443 /* passed fds are erased in the kfree_skb hook */ 443 /* passed fds are erased in the kfree_skb hook */
444 UNIXCB(skb).consumed = skb->len;
444 kfree_skb(skb); 445 kfree_skb(skb);
445 } 446 }
446 447
@@ -1799,6 +1800,7 @@ alloc_skb:
1799 * this - does no harm 1800 * this - does no harm
1800 */ 1801 */
1801 consume_skb(newskb); 1802 consume_skb(newskb);
1803 newskb = NULL;
1802 } 1804 }
1803 1805
1804 if (skb_append_pagefrags(skb, page, offset, size)) { 1806 if (skb_append_pagefrags(skb, page, offset, size)) {
@@ -1811,8 +1813,11 @@ alloc_skb:
1811 skb->truesize += size; 1813 skb->truesize += size;
1812 atomic_add(size, &sk->sk_wmem_alloc); 1814 atomic_add(size, &sk->sk_wmem_alloc);
1813 1815
1814 if (newskb) 1816 if (newskb) {
1817 spin_lock(&other->sk_receive_queue.lock);
1815 __skb_queue_tail(&other->sk_receive_queue, newskb); 1818 __skb_queue_tail(&other->sk_receive_queue, newskb);
1819 spin_unlock(&other->sk_receive_queue.lock);
1820 }
1816 1821
1817 unix_state_unlock(other); 1822 unix_state_unlock(other);
1818 mutex_unlock(&unix_sk(other)->readlock); 1823 mutex_unlock(&unix_sk(other)->readlock);
@@ -2072,6 +2077,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2072 2077
2073 do { 2078 do {
2074 int chunk; 2079 int chunk;
2080 bool drop_skb;
2075 struct sk_buff *skb, *last; 2081 struct sk_buff *skb, *last;
2076 2082
2077 unix_state_lock(sk); 2083 unix_state_lock(sk);
@@ -2152,7 +2158,11 @@ unlock:
2152 } 2158 }
2153 2159
2154 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); 2160 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2161 skb_get(skb);
2155 chunk = state->recv_actor(skb, skip, chunk, state); 2162 chunk = state->recv_actor(skb, skip, chunk, state);
2163 drop_skb = !unix_skb_len(skb);
2164 /* skb is only safe to use if !drop_skb */
2165 consume_skb(skb);
2156 if (chunk < 0) { 2166 if (chunk < 0) {
2157 if (copied == 0) 2167 if (copied == 0)
2158 copied = -EFAULT; 2168 copied = -EFAULT;
@@ -2161,6 +2171,18 @@ unlock:
2161 copied += chunk; 2171 copied += chunk;
2162 size -= chunk; 2172 size -= chunk;
2163 2173
2174 if (drop_skb) {
2175 /* the skb was touched by a concurrent reader;
2176 * we should not expect anything from this skb
2177 * anymore and assume it invalid - we can be
2178 * sure it was dropped from the socket queue
2179 *
2180 * let's report a short read
2181 */
2182 err = 0;
2183 break;
2184 }
2185
2164 /* Mark read part of skb as used */ 2186 /* Mark read part of skb as used */
2165 if (!(flags & MSG_PEEK)) { 2187 if (!(flags & MSG_PEEK)) {
2166 UNIXCB(skb).consumed += chunk; 2188 UNIXCB(skb).consumed += chunk;
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index 2ad46f39649f..1820e74a5752 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -121,7 +121,7 @@ struct vmci_transport {
121 u64 queue_pair_max_size; 121 u64 queue_pair_max_size;
122 u32 detach_sub_id; 122 u32 detach_sub_id;
123 union vmci_transport_notify notify; 123 union vmci_transport_notify notify;
124 struct vmci_transport_notify_ops *notify_ops; 124 const struct vmci_transport_notify_ops *notify_ops;
125 struct list_head elem; 125 struct list_head elem;
126 struct sock *sk; 126 struct sock *sk;
127 spinlock_t lock; /* protects sk. */ 127 spinlock_t lock; /* protects sk. */
diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c
index 9b7f207f2bee..fd8cf0214d51 100644
--- a/net/vmw_vsock/vmci_transport_notify.c
+++ b/net/vmw_vsock/vmci_transport_notify.c
@@ -661,7 +661,7 @@ static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
661} 661}
662 662
663/* Socket control packet based operations. */ 663/* Socket control packet based operations. */
664struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = { 664const struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
665 vmci_transport_notify_pkt_socket_init, 665 vmci_transport_notify_pkt_socket_init,
666 vmci_transport_notify_pkt_socket_destruct, 666 vmci_transport_notify_pkt_socket_destruct,
667 vmci_transport_notify_pkt_poll_in, 667 vmci_transport_notify_pkt_poll_in,
diff --git a/net/vmw_vsock/vmci_transport_notify.h b/net/vmw_vsock/vmci_transport_notify.h
index 7df793249b6c..3c464d394a8f 100644
--- a/net/vmw_vsock/vmci_transport_notify.h
+++ b/net/vmw_vsock/vmci_transport_notify.h
@@ -77,7 +77,8 @@ struct vmci_transport_notify_ops {
77 void (*process_negotiate) (struct sock *sk); 77 void (*process_negotiate) (struct sock *sk);
78}; 78};
79 79
80extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops; 80extern const struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops;
81extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops; 81extern const
82struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops;
82 83
83#endif /* __VMCI_TRANSPORT_NOTIFY_H__ */ 84#endif /* __VMCI_TRANSPORT_NOTIFY_H__ */
diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c
index dc9c7929a2f9..21e591dafb03 100644
--- a/net/vmw_vsock/vmci_transport_notify_qstate.c
+++ b/net/vmw_vsock/vmci_transport_notify_qstate.c
@@ -419,7 +419,7 @@ vmci_transport_notify_pkt_send_pre_enqueue(
419} 419}
420 420
421/* Socket always on control packet based operations. */ 421/* Socket always on control packet based operations. */
422struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = { 422const struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
423 vmci_transport_notify_pkt_socket_init, 423 vmci_transport_notify_pkt_socket_init,
424 vmci_transport_notify_pkt_socket_destruct, 424 vmci_transport_notify_pkt_socket_destruct,
425 vmci_transport_notify_pkt_poll_in, 425 vmci_transport_notify_pkt_poll_in,