aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c28
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_core.c85
-rw-r--r--net/8021q/vlan_dev.c223
-rw-r--r--net/9p/client.c9
-rw-r--r--net/Kconfig14
-rw-r--r--net/atm/lec.c4
-rw-r--r--net/ax25/af_ax25.c16
-rw-r--r--net/ax25/ax25_iface.c3
-rw-r--r--net/batman-adv/aggregation.c1
-rw-r--r--net/batman-adv/gateway_client.c296
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/hard-interface.c83
-rw-r--r--net/batman-adv/hard-interface.h18
-rw-r--r--net/batman-adv/icmp_socket.c37
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/originator.c72
-rw-r--r--net/batman-adv/originator.h1
-rw-r--r--net/batman-adv/routing.c412
-rw-r--r--net/batman-adv/send.c37
-rw-r--r--net/batman-adv/soft-interface.c190
-rw-r--r--net/batman-adv/translation-table.c57
-rw-r--r--net/batman-adv/types.h9
-rw-r--r--net/batman-adv/unicast.c18
-rw-r--r--net/batman-adv/vis.c128
-rw-r--r--net/bluetooth/bnep/bnep.h148
-rw-r--r--net/bluetooth/bnep/core.c71
-rw-r--r--net/bluetooth/bnep/sock.c2
-rw-r--r--net/bluetooth/cmtp/capi.c6
-rw-r--r--net/bluetooth/cmtp/cmtp.h11
-rw-r--r--net/bluetooth/cmtp/core.c28
-rw-r--r--net/bluetooth/cmtp/sock.c2
-rw-r--r--net/bluetooth/hci_conn.c78
-rw-r--r--net/bluetooth/hci_core.c154
-rw-r--r--net/bluetooth/hci_event.c262
-rw-r--r--net/bluetooth/hci_sysfs.c71
-rw-r--r--net/bluetooth/hidp/core.c96
-rw-r--r--net/bluetooth/hidp/hidp.h6
-rw-r--r--net/bluetooth/hidp/sock.c7
-rw-r--r--net/bluetooth/l2cap_core.c1817
-rw-r--r--net/bluetooth/l2cap_sock.c288
-rw-r--r--net/bluetooth/mgmt.c609
-rw-r--r--net/bluetooth/rfcomm/core.c21
-rw-r--r--net/bluetooth/rfcomm/sock.c5
-rw-r--r--net/bridge/br.c1
-rw-r--r--net/bridge/br_device.c100
-rw-r--r--net/bridge/br_fdb.c311
-rw-r--r--net/bridge/br_if.c104
-rw-r--r--net/bridge/br_input.c5
-rw-r--r--net/bridge/br_ioctl.c40
-rw-r--r--net/bridge/br_multicast.c12
-rw-r--r--net/bridge/br_netfilter.c4
-rw-r--r--net/bridge/br_netlink.c60
-rw-r--r--net/bridge/br_notify.c11
-rw-r--r--net/bridge/br_private.h22
-rw-r--r--net/bridge/br_private_stp.h13
-rw-r--r--net/bridge/br_stp.c48
-rw-r--r--net/bridge/br_stp_if.c21
-rw-r--r--net/bridge/br_sysfs_br.c39
-rw-r--r--net/bridge/br_sysfs_if.c26
-rw-r--r--net/caif/caif_config_util.c6
-rw-r--r--net/caif/caif_dev.c56
-rw-r--r--net/caif/caif_socket.c35
-rw-r--r--net/caif/cfcnfg.c2
-rw-r--r--net/caif/cfctrl.c75
-rw-r--r--net/caif/cfdgml.c7
-rw-r--r--net/caif/cffrml.c8
-rw-r--r--net/caif/cfmuxl.c45
-rw-r--r--net/caif/cfpkt_skbuff.c178
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c7
-rw-r--r--net/caif/cfutill.c7
-rw-r--r--net/caif/cfveil.c5
-rw-r--r--net/caif/cfvidl.c5
-rw-r--r--net/can/af_can.c67
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/raw.c2
-rw-r--r--net/compat.c16
-rw-r--r--net/core/dev.c152
-rw-r--r--net/core/dst.c34
-rw-r--r--net/core/ethtool.c106
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/core/filter.c65
-rw-r--r--net/core/net-sysfs.c24
-rw-r--r--net/core/net_namespace.c12
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c37
-rw-r--r--net/core/rtnetlink.c20
-rw-r--r--net/core/sysctl_net_core.c9
-rw-r--r--net/dccp/ipv4.c30
-rw-r--r--net/dccp/ipv6.c10
-rw-r--r--net/decnet/dn_dev.c10
-rw-r--r--net/decnet/dn_route.c15
-rw-r--r--net/decnet/dn_table.c4
-rw-r--r--net/dsa/slave.c1
-rw-r--r--net/econet/af_econet.c8
-rw-r--r--net/ipv4/af_inet.c29
-rw-r--r--net/ipv4/ah4.c7
-rw-r--r--net/ipv4/cipso_ipv4.c113
-rw-r--r--net/ipv4/datagram.c9
-rw-r--r--net/ipv4/esp4.c7
-rw-r--r--net/ipv4/fib_frontend.c16
-rw-r--r--net/ipv4/fib_trie.c110
-rw-r--r--net/ipv4/icmp.c35
-rw-r--r--net/ipv4/igmp.c22
-rw-r--r--net/ipv4/inet_connection_sock.c22
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_lro.c4
-rw-r--r--net/ipv4/ip_gre.c70
-rw-r--r--net/ipv4/ip_input.c4
-rw-r--r--net/ipv4/ip_options.c38
-rw-r--r--net/ipv4/ip_output.c69
-rw-r--r--net/ipv4/ip_sockglue.c37
-rw-r--r--net/ipv4/ipcomp.c4
-rw-r--r--net/ipv4/ipip.c36
-rw-r--r--net/ipv4/ipmr.c39
-rw-r--r--net/ipv4/netfilter/arp_tables.c18
-rw-r--r--net/ipv4/netfilter/ip_tables.c28
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c2
-rw-r--r--net/ipv4/raw.c47
-rw-r--r--net/ipv4/route.c323
-rw-r--r--net/ipv4/syncookies.c22
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_ipv4.c58
-rw-r--r--net/ipv4/udp.c41
-rw-r--r--net/ipv4/xfrm4_policy.c4
-rw-r--r--net/ipv4/xfrm4_state.c2
-rw-r--r--net/ipv6/addrconf.c18
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/anycast.c16
-rw-r--r--net/ipv6/esp6.c5
-rw-r--r--net/ipv6/icmp.c8
-rw-r--r--net/ipv6/ip6_fib.c20
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_output.c16
-rw-r--r--net/ipv6/ip6_tunnel.c46
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ipcomp6.c5
-rw-r--r--net/ipv6/mcast.c36
-rw-r--r--net/ipv6/mip6.c8
-rw-r--r--net/ipv6/ndisc.c51
-rw-r--r--net/ipv6/netfilter.c10
-rw-r--r--net/ipv6/netfilter/ip6_tables.c21
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c3
-rw-r--r--net/ipv6/raw.c14
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c157
-rw-r--r--net/ipv6/sit.c40
-rw-r--r--net/ipv6/syncookies.c13
-rw-r--r--net/ipv6/tcp_ipv6.c50
-rw-r--r--net/ipv6/udp.c22
-rw-r--r--net/ipv6/xfrm6_mode_beet.c2
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c6
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c10
-rw-r--r--net/irda/irlap_event.c3
-rw-r--r--net/irda/irproc.c5
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_ip.c29
-rw-r--r--net/l2tp/l2tp_netlink.c3
-rw-r--r--net/mac80211/Kconfig1
-rw-r--r--net/mac80211/aes_ccm.c6
-rw-r--r--net/mac80211/cfg.c55
-rw-r--r--net/mac80211/debugfs.c89
-rw-r--r--net/mac80211/debugfs_sta.c26
-rw-r--r--net/mac80211/driver-ops.h31
-rw-r--r--net/mac80211/driver-trace.h47
-rw-r--r--net/mac80211/ibss.c10
-rw-r--r--net/mac80211/ieee80211_i.h12
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/key.c21
-rw-r--r--net/mac80211/main.c32
-rw-r--r--net/mac80211/mesh.c17
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/mesh_pathtbl.c49
-rw-r--r--net/mac80211/mesh_plink.c35
-rw-r--r--net/mac80211/mlme.c26
-rw-r--r--net/mac80211/pm.c16
-rw-r--r--net/mac80211/rx.c77
-rw-r--r--net/mac80211/sta_info.c33
-rw-r--r--net/mac80211/sta_info.h6
-rw-r--r--net/mac80211/status.c19
-rw-r--r--net/mac80211/tkip.c4
-rw-r--r--net/mac80211/tkip.h4
-rw-r--r--net/mac80211/tx.c17
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/mac80211/wep.c34
-rw-r--r--net/mac80211/wep.h4
-rw-r--r--net/mac80211/work.c6
-rw-r--r--net/mac80211/wpa.c62
-rw-r--r--net/netfilter/ipset/ip_set_getport.c16
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/x_tables.c9
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netrom/af_netrom.c12
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/phonet/pn_dev.c6
-rw-r--r--net/phonet/pn_netlink.c4
-rw-r--r--net/phonet/socket.c45
-rw-r--r--net/rfkill/Kconfig11
-rw-r--r--net/rfkill/Makefile1
-rw-r--r--net/rfkill/rfkill-regulator.c164
-rw-r--r--net/rose/af_rose.c16
-rw-r--r--net/rxrpc/ar-peer.c3
-rw-r--r--net/sched/Kconfig11
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/sch_qfq.c1137
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sctp/debug.c1
-rw-r--r--net/sctp/endpointola.c20
-rw-r--r--net/sctp/input.c19
-rw-r--r--net/sctp/ipv6.c185
-rw-r--r--net/sctp/outqueue.c19
-rw-r--r--net/sctp/protocol.c63
-rw-r--r--net/sctp/sm_make_chunk.c62
-rw-r--r--net/sctp/sm_sideeffect.c9
-rw-r--r--net/sctp/sm_statefuns.c89
-rw-r--r--net/sctp/sm_statetable.c78
-rw-r--r--net/sctp/socket.c82
-rw-r--r--net/sctp/transport.c28
-rw-r--r--net/sctp/ulpevent.c30
-rw-r--r--net/socket.c213
-rw-r--r--net/wireless/core.c17
-rw-r--r--net/wireless/mesh.c23
-rw-r--r--net/wireless/mlme.c9
-rw-r--r--net/wireless/nl80211.c122
-rw-r--r--net/wireless/nl80211.h7
-rw-r--r--net/wireless/reg.c72
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_state.c12
237 files changed, 7903 insertions, 4651 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 7850412f52b7..969e7004cf86 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -49,11 +49,6 @@ const char vlan_version[] = DRV_VERSION;
49static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>"; 49static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
50static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; 50static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
51 51
52static struct packet_type vlan_packet_type __read_mostly = {
53 .type = cpu_to_be16(ETH_P_8021Q),
54 .func = vlan_skb_recv, /* VLAN receive method */
55};
56
57/* End of global variables definitions. */ 52/* End of global variables definitions. */
58 53
59static void vlan_group_free(struct vlan_group *grp) 54static void vlan_group_free(struct vlan_group *grp)
@@ -327,10 +322,6 @@ static void vlan_sync_address(struct net_device *dev,
327static void vlan_transfer_features(struct net_device *dev, 322static void vlan_transfer_features(struct net_device *dev,
328 struct net_device *vlandev) 323 struct net_device *vlandev)
329{ 324{
330 u32 old_features = vlandev->features;
331
332 vlandev->features &= ~dev->vlan_features;
333 vlandev->features |= dev->features & dev->vlan_features;
334 vlandev->gso_max_size = dev->gso_max_size; 325 vlandev->gso_max_size = dev->gso_max_size;
335 326
336 if (dev->features & NETIF_F_HW_VLAN_TX) 327 if (dev->features & NETIF_F_HW_VLAN_TX)
@@ -341,8 +332,8 @@ static void vlan_transfer_features(struct net_device *dev,
341#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 332#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
342 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 333 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
343#endif 334#endif
344 if (old_features != vlandev->features) 335
345 netdev_features_change(vlandev); 336 netdev_update_features(vlandev);
346} 337}
347 338
348static void __vlan_device_event(struct net_device *dev, unsigned long event) 339static void __vlan_device_event(struct net_device *dev, unsigned long event)
@@ -508,6 +499,18 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
508 case NETDEV_PRE_TYPE_CHANGE: 499 case NETDEV_PRE_TYPE_CHANGE:
509 /* Forbid underlaying device to change its type. */ 500 /* Forbid underlaying device to change its type. */
510 return NOTIFY_BAD; 501 return NOTIFY_BAD;
502
503 case NETDEV_NOTIFY_PEERS:
504 case NETDEV_BONDING_FAILOVER:
505 /* Propagate to vlan devices */
506 for (i = 0; i < VLAN_N_VID; i++) {
507 vlandev = vlan_group_get_device(grp, i);
508 if (!vlandev)
509 continue;
510
511 call_netdevice_notifiers(event, vlandev);
512 }
513 break;
511 } 514 }
512 515
513out: 516out:
@@ -688,7 +691,6 @@ static int __init vlan_proto_init(void)
688 if (err < 0) 691 if (err < 0)
689 goto err4; 692 goto err4;
690 693
691 dev_add_pack(&vlan_packet_type);
692 vlan_ioctl_set(vlan_ioctl_handler); 694 vlan_ioctl_set(vlan_ioctl_handler);
693 return 0; 695 return 0;
694 696
@@ -709,8 +711,6 @@ static void __exit vlan_cleanup_module(void)
709 711
710 unregister_netdevice_notifier(&vlan_notifier_block); 712 unregister_netdevice_notifier(&vlan_notifier_block);
711 713
712 dev_remove_pack(&vlan_packet_type);
713
714 unregister_pernet_subsys(&vlan_net_ops); 714 unregister_pernet_subsys(&vlan_net_ops);
715 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 715 rcu_barrier(); /* Wait for completion of call_rcu()'s */
716 716
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 5687c9b95f33..c3408def8a19 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -75,8 +75,6 @@ static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
75} 75}
76 76
77/* found in vlan_dev.c */ 77/* found in vlan_dev.c */
78int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
79 struct packet_type *ptype, struct net_device *orig_dev);
80void vlan_dev_set_ingress_priority(const struct net_device *dev, 78void vlan_dev_set_ingress_priority(const struct net_device *dev,
81 u32 skb_prio, u16 vlan_prio); 79 u32 skb_prio, u16 vlan_prio);
82int vlan_dev_set_egress_priority(const struct net_device *dev, 80int vlan_dev_set_egress_priority(const struct net_device *dev,
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index ce8e3ab3e7a5..41495dc2a4c9 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -4,7 +4,7 @@
4#include <linux/netpoll.h> 4#include <linux/netpoll.h>
5#include "vlan.h" 5#include "vlan.h"
6 6
7bool vlan_hwaccel_do_receive(struct sk_buff **skbp) 7bool vlan_do_receive(struct sk_buff **skbp)
8{ 8{
9 struct sk_buff *skb = *skbp; 9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
@@ -88,3 +88,86 @@ gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
88 return napi_gro_frags(napi); 88 return napi_gro_frags(napi);
89} 89}
90EXPORT_SYMBOL(vlan_gro_frags); 90EXPORT_SYMBOL(vlan_gro_frags);
91
92static struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
93{
94 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
95 if (skb_cow(skb, skb_headroom(skb)) < 0)
96 skb = NULL;
97 if (skb) {
98 /* Lifted from Gleb's VLAN code... */
99 memmove(skb->data - ETH_HLEN,
100 skb->data - VLAN_ETH_HLEN, 12);
101 skb->mac_header += VLAN_HLEN;
102 }
103 }
104 return skb;
105}
106
107static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
108{
109 __be16 proto;
110 unsigned char *rawp;
111
112 /*
113 * Was a VLAN packet, grab the encapsulated protocol, which the layer
114 * three protocols care about.
115 */
116
117 proto = vhdr->h_vlan_encapsulated_proto;
118 if (ntohs(proto) >= 1536) {
119 skb->protocol = proto;
120 return;
121 }
122
123 rawp = skb->data;
124 if (*(unsigned short *) rawp == 0xFFFF)
125 /*
126 * This is a magic hack to spot IPX packets. Older Novell
127 * breaks the protocol design and runs IPX over 802.3 without
128 * an 802.2 LLC layer. We look for FFFF which isn't a used
129 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
130 * but does for the rest.
131 */
132 skb->protocol = htons(ETH_P_802_3);
133 else
134 /*
135 * Real 802.2 LLC
136 */
137 skb->protocol = htons(ETH_P_802_2);
138}
139
140struct sk_buff *vlan_untag(struct sk_buff *skb)
141{
142 struct vlan_hdr *vhdr;
143 u16 vlan_tci;
144
145 if (unlikely(vlan_tx_tag_present(skb))) {
146 /* vlan_tci is already set-up so leave this for another time */
147 return skb;
148 }
149
150 skb = skb_share_check(skb, GFP_ATOMIC);
151 if (unlikely(!skb))
152 goto err_free;
153
154 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
155 goto err_free;
156
157 vhdr = (struct vlan_hdr *) skb->data;
158 vlan_tci = ntohs(vhdr->h_vlan_TCI);
159 __vlan_hwaccel_put_tag(skb, vlan_tci);
160
161 skb_pull_rcsum(skb, VLAN_HLEN);
162 vlan_set_encap_proto(skb, vhdr);
163
164 skb = vlan_check_reorder_header(skb);
165 if (unlikely(!skb))
166 goto err_free;
167
168 return skb;
169
170err_free:
171 kfree_skb(skb);
172 return NULL;
173}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e34ea9e5e28b..d174c312b7f1 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -65,179 +65,6 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)
65 return 0; 65 return 0;
66} 66}
67 67
68static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
69{
70 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
71 if (skb_cow(skb, skb_headroom(skb)) < 0)
72 skb = NULL;
73 if (skb) {
74 /* Lifted from Gleb's VLAN code... */
75 memmove(skb->data - ETH_HLEN,
76 skb->data - VLAN_ETH_HLEN, 12);
77 skb->mac_header += VLAN_HLEN;
78 }
79 }
80
81 return skb;
82}
83
84static inline void vlan_set_encap_proto(struct sk_buff *skb,
85 struct vlan_hdr *vhdr)
86{
87 __be16 proto;
88 unsigned char *rawp;
89
90 /*
91 * Was a VLAN packet, grab the encapsulated protocol, which the layer
92 * three protocols care about.
93 */
94
95 proto = vhdr->h_vlan_encapsulated_proto;
96 if (ntohs(proto) >= 1536) {
97 skb->protocol = proto;
98 return;
99 }
100
101 rawp = skb->data;
102 if (*(unsigned short *)rawp == 0xFFFF)
103 /*
104 * This is a magic hack to spot IPX packets. Older Novell
105 * breaks the protocol design and runs IPX over 802.3 without
106 * an 802.2 LLC layer. We look for FFFF which isn't a used
107 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
108 * but does for the rest.
109 */
110 skb->protocol = htons(ETH_P_802_3);
111 else
112 /*
113 * Real 802.2 LLC
114 */
115 skb->protocol = htons(ETH_P_802_2);
116}
117
118/*
119 * Determine the packet's protocol ID. The rule here is that we
120 * assume 802.3 if the type field is short enough to be a length.
121 * This is normal practice and works for any 'now in use' protocol.
122 *
123 * Also, at this point we assume that we ARE dealing exclusively with
124 * VLAN packets, or packets that should be made into VLAN packets based
125 * on a default VLAN ID.
126 *
127 * NOTE: Should be similar to ethernet/eth.c.
128 *
129 * SANITY NOTE: This method is called when a packet is moving up the stack
130 * towards userland. To get here, it would have already passed
131 * through the ethernet/eth.c eth_type_trans() method.
132 * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
133 * stored UNALIGNED in the memory. RISC systems don't like
134 * such cases very much...
135 * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be
136 * aligned, so there doesn't need to be any of the unaligned
137 * stuff. It has been commented out now... --Ben
138 *
139 */
140int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
141 struct packet_type *ptype, struct net_device *orig_dev)
142{
143 struct vlan_hdr *vhdr;
144 struct vlan_pcpu_stats *rx_stats;
145 struct net_device *vlan_dev;
146 u16 vlan_id;
147 u16 vlan_tci;
148
149 skb = skb_share_check(skb, GFP_ATOMIC);
150 if (skb == NULL)
151 goto err_free;
152
153 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
154 goto err_free;
155
156 vhdr = (struct vlan_hdr *)skb->data;
157 vlan_tci = ntohs(vhdr->h_vlan_TCI);
158 vlan_id = vlan_tci & VLAN_VID_MASK;
159
160 rcu_read_lock();
161 vlan_dev = vlan_find_dev(dev, vlan_id);
162
163 /* If the VLAN device is defined, we use it.
164 * If not, and the VID is 0, it is a 802.1p packet (not
165 * really a VLAN), so we will just netif_rx it later to the
166 * original interface, but with the skb->proto set to the
167 * wrapped proto: we do nothing here.
168 */
169
170 if (!vlan_dev) {
171 if (vlan_id) {
172 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
173 __func__, vlan_id, dev->name);
174 goto err_unlock;
175 }
176 rx_stats = NULL;
177 } else {
178 skb->dev = vlan_dev;
179
180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
181
182 u64_stats_update_begin(&rx_stats->syncp);
183 rx_stats->rx_packets++;
184 rx_stats->rx_bytes += skb->len;
185
186 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
187
188 pr_debug("%s: priority: %u for TCI: %hu\n",
189 __func__, skb->priority, vlan_tci);
190
191 switch (skb->pkt_type) {
192 case PACKET_BROADCAST:
193 /* Yeah, stats collect these together.. */
194 /* stats->broadcast ++; // no such counter :-( */
195 break;
196
197 case PACKET_MULTICAST:
198 rx_stats->rx_multicast++;
199 break;
200
201 case PACKET_OTHERHOST:
202 /* Our lower layer thinks this is not local, let's make
203 * sure.
204 * This allows the VLAN to have a different MAC than the
205 * underlying device, and still route correctly.
206 */
207 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
208 skb->dev->dev_addr))
209 skb->pkt_type = PACKET_HOST;
210 break;
211 default:
212 break;
213 }
214 u64_stats_update_end(&rx_stats->syncp);
215 }
216
217 skb_pull_rcsum(skb, VLAN_HLEN);
218 vlan_set_encap_proto(skb, vhdr);
219
220 if (vlan_dev) {
221 skb = vlan_check_reorder_header(skb);
222 if (!skb) {
223 rx_stats->rx_errors++;
224 goto err_unlock;
225 }
226 }
227
228 netif_rx(skb);
229
230 rcu_read_unlock();
231 return NET_RX_SUCCESS;
232
233err_unlock:
234 rcu_read_unlock();
235err_free:
236 atomic_long_inc(&dev->rx_dropped);
237 kfree_skb(skb);
238 return NET_RX_DROP;
239}
240
241static inline u16 68static inline u16
242vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) 69vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
243{ 70{
@@ -704,8 +531,8 @@ static int vlan_dev_init(struct net_device *dev)
704 (1<<__LINK_STATE_DORMANT))) | 531 (1<<__LINK_STATE_DORMANT))) |
705 (1<<__LINK_STATE_PRESENT); 532 (1<<__LINK_STATE_PRESENT);
706 533
707 dev->features |= real_dev->features & real_dev->vlan_features; 534 dev->hw_features = real_dev->vlan_features & NETIF_F_ALL_TX_OFFLOADS;
708 dev->features |= NETIF_F_LLTX; 535 dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
709 dev->gso_max_size = real_dev->gso_max_size; 536 dev->gso_max_size = real_dev->gso_max_size;
710 537
711 /* ipv6 shared card related stuff */ 538 /* ipv6 shared card related stuff */
@@ -759,6 +586,17 @@ static void vlan_dev_uninit(struct net_device *dev)
759 } 586 }
760} 587}
761 588
589static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
590{
591 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
592
593 features &= (real_dev->features | NETIF_F_LLTX);
594 if (dev_ethtool_get_rx_csum(real_dev))
595 features |= NETIF_F_RXCSUM;
596
597 return features;
598}
599
762static int vlan_ethtool_get_settings(struct net_device *dev, 600static int vlan_ethtool_get_settings(struct net_device *dev,
763 struct ethtool_cmd *cmd) 601 struct ethtool_cmd *cmd)
764{ 602{
@@ -774,18 +612,6 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
774 strcpy(info->fw_version, "N/A"); 612 strcpy(info->fw_version, "N/A");
775} 613}
776 614
777static u32 vlan_ethtool_get_rx_csum(struct net_device *dev)
778{
779 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
780 return dev_ethtool_get_rx_csum(vlan->real_dev);
781}
782
783static u32 vlan_ethtool_get_flags(struct net_device *dev)
784{
785 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
786 return dev_ethtool_get_flags(vlan->real_dev);
787}
788
789static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 615static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
790{ 616{
791 617
@@ -823,32 +649,10 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
823 return stats; 649 return stats;
824} 650}
825 651
826static int vlan_ethtool_set_tso(struct net_device *dev, u32 data)
827{
828 if (data) {
829 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
830
831 /* Underlying device must support TSO for VLAN-tagged packets
832 * and must have TSO enabled now.
833 */
834 if (!(real_dev->vlan_features & NETIF_F_TSO))
835 return -EOPNOTSUPP;
836 if (!(real_dev->features & NETIF_F_TSO))
837 return -EINVAL;
838 dev->features |= NETIF_F_TSO;
839 } else {
840 dev->features &= ~NETIF_F_TSO;
841 }
842 return 0;
843}
844
845static const struct ethtool_ops vlan_ethtool_ops = { 652static const struct ethtool_ops vlan_ethtool_ops = {
846 .get_settings = vlan_ethtool_get_settings, 653 .get_settings = vlan_ethtool_get_settings,
847 .get_drvinfo = vlan_ethtool_get_drvinfo, 654 .get_drvinfo = vlan_ethtool_get_drvinfo,
848 .get_link = ethtool_op_get_link, 655 .get_link = ethtool_op_get_link,
849 .get_rx_csum = vlan_ethtool_get_rx_csum,
850 .get_flags = vlan_ethtool_get_flags,
851 .set_tso = vlan_ethtool_set_tso,
852}; 656};
853 657
854static const struct net_device_ops vlan_netdev_ops = { 658static const struct net_device_ops vlan_netdev_ops = {
@@ -874,6 +678,7 @@ static const struct net_device_ops vlan_netdev_ops = {
874 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, 678 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
875 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, 679 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
876#endif 680#endif
681 .ndo_fix_features = vlan_dev_fix_features,
877}; 682};
878 683
879void vlan_setup(struct net_device *dev) 684void vlan_setup(struct net_device *dev)
diff --git a/net/9p/client.c b/net/9p/client.c
index 48b8e084e710..0ce959218607 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1302,7 +1302,7 @@ int
1302p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, 1302p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1303 u32 count) 1303 u32 count)
1304{ 1304{
1305 int err, rsize, total; 1305 int err, rsize;
1306 struct p9_client *clnt; 1306 struct p9_client *clnt;
1307 struct p9_req_t *req; 1307 struct p9_req_t *req;
1308 char *dataptr; 1308 char *dataptr;
@@ -1311,7 +1311,6 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1311 (long long unsigned) offset, count); 1311 (long long unsigned) offset, count);
1312 err = 0; 1312 err = 0;
1313 clnt = fid->clnt; 1313 clnt = fid->clnt;
1314 total = 0;
1315 1314
1316 rsize = fid->iounit; 1315 rsize = fid->iounit;
1317 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) 1316 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
@@ -1367,7 +1366,7 @@ int
1367p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, 1366p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1368 u64 offset, u32 count) 1367 u64 offset, u32 count)
1369{ 1368{
1370 int err, rsize, total; 1369 int err, rsize;
1371 struct p9_client *clnt; 1370 struct p9_client *clnt;
1372 struct p9_req_t *req; 1371 struct p9_req_t *req;
1373 1372
@@ -1375,7 +1374,6 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1375 fid->fid, (long long unsigned) offset, count); 1374 fid->fid, (long long unsigned) offset, count);
1376 err = 0; 1375 err = 0;
1377 clnt = fid->clnt; 1376 clnt = fid->clnt;
1378 total = 0;
1379 1377
1380 rsize = fid->iounit; 1378 rsize = fid->iounit;
1381 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) 1379 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
@@ -1766,7 +1764,7 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
1766 1764
1767int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) 1765int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1768{ 1766{
1769 int err, rsize, total; 1767 int err, rsize;
1770 struct p9_client *clnt; 1768 struct p9_client *clnt;
1771 struct p9_req_t *req; 1769 struct p9_req_t *req;
1772 char *dataptr; 1770 char *dataptr;
@@ -1776,7 +1774,6 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1776 1774
1777 err = 0; 1775 err = 0;
1778 clnt = fid->clnt; 1776 clnt = fid->clnt;
1779 total = 0;
1780 1777
1781 rsize = fid->iounit; 1778 rsize = fid->iounit;
1782 if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ) 1779 if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ)
diff --git a/net/Kconfig b/net/Kconfig
index 79cabf1ee68b..878151c772c9 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -232,6 +232,20 @@ config XPS
232 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS 232 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
233 default y 233 default y
234 234
235config HAVE_BPF_JIT
236 bool
237
238config BPF_JIT
239 bool "enable BPF Just In Time compiler"
240 depends on HAVE_BPF_JIT
241 depends on MODULES
242 ---help---
243 Berkeley Packet Filter filtering capabilities are normally handled
244 by an interpreter. This option allows kernel to generate a native
245 code when filter is loaded in memory. This should speedup
246 packet sniffing (libpcap/tcpdump). Note : Admin should enable
247 this feature changing /proc/sys/net/core/bpf_jit_enable
248
235menu "Network testing" 249menu "Network testing"
236 250
237config NET_PKTGEN 251config NET_PKTGEN
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 38754fdb88ba..25073b6ef474 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -129,7 +129,6 @@ static struct net_device *dev_lec[MAX_LEC_ITF];
129#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 129#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
130static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) 130static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
131{ 131{
132 struct ethhdr *eth;
133 char *buff; 132 char *buff;
134 struct lec_priv *priv; 133 struct lec_priv *priv;
135 134
@@ -138,7 +137,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
138 * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit 137 * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit
139 * as the Config BPDU has 138 * as the Config BPDU has
140 */ 139 */
141 eth = (struct ethhdr *)skb->data;
142 buff = skb->data + skb->dev->hard_header_len; 140 buff = skb->data + skb->dev->hard_header_len;
143 if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) { 141 if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) {
144 struct sock *sk; 142 struct sock *sk;
@@ -1180,7 +1178,6 @@ static int __init lane_module_init(void)
1180static void __exit lane_module_cleanup(void) 1178static void __exit lane_module_cleanup(void)
1181{ 1179{
1182 int i; 1180 int i;
1183 struct lec_priv *priv;
1184 1181
1185 remove_proc_entry("lec", atm_proc_root); 1182 remove_proc_entry("lec", atm_proc_root);
1186 1183
@@ -1188,7 +1185,6 @@ static void __exit lane_module_cleanup(void)
1188 1185
1189 for (i = 0; i < MAX_LEC_ITF; i++) { 1186 for (i = 0; i < MAX_LEC_ITF; i++) {
1190 if (dev_lec[i] != NULL) { 1187 if (dev_lec[i] != NULL) {
1191 priv = netdev_priv(dev_lec[i]);
1192 unregister_netdev(dev_lec[i]); 1188 unregister_netdev(dev_lec[i]);
1193 free_netdev(dev_lec[i]); 1189 free_netdev(dev_lec[i]);
1194 dev_lec[i] = NULL; 1190 dev_lec[i] = NULL;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 6da5daeebab7..e7c69f4619ec 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1538,8 +1538,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1538 } 1538 }
1539 1539
1540 /* Build a packet */ 1540 /* Build a packet */
1541 SOCK_DEBUG(sk, "AX.25: sendto: Addresses built. Building packet.\n");
1542
1543 /* Assume the worst case */ 1541 /* Assume the worst case */
1544 size = len + ax25->ax25_dev->dev->hard_header_len; 1542 size = len + ax25->ax25_dev->dev->hard_header_len;
1545 1543
@@ -1549,8 +1547,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1549 1547
1550 skb_reserve(skb, size - len); 1548 skb_reserve(skb, size - len);
1551 1549
1552 SOCK_DEBUG(sk, "AX.25: Appending user data\n");
1553
1554 /* User data follows immediately after the AX.25 data */ 1550 /* User data follows immediately after the AX.25 data */
1555 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1551 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1556 err = -EFAULT; 1552 err = -EFAULT;
@@ -1564,8 +1560,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1564 if (!ax25->pidincl) 1560 if (!ax25->pidincl)
1565 *skb_push(skb, 1) = sk->sk_protocol; 1561 *skb_push(skb, 1) = sk->sk_protocol;
1566 1562
1567 SOCK_DEBUG(sk, "AX.25: Transmitting buffer\n");
1568
1569 if (sk->sk_type == SOCK_SEQPACKET) { 1563 if (sk->sk_type == SOCK_SEQPACKET) {
1570 /* Connected mode sockets go via the LAPB machine */ 1564 /* Connected mode sockets go via the LAPB machine */
1571 if (sk->sk_state != TCP_ESTABLISHED) { 1565 if (sk->sk_state != TCP_ESTABLISHED) {
@@ -1583,22 +1577,14 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1583 1577
1584 skb_push(skb, 1 + ax25_addr_size(dp)); 1578 skb_push(skb, 1 + ax25_addr_size(dp));
1585 1579
1586 SOCK_DEBUG(sk, "Building AX.25 Header (dp=%p).\n", dp); 1580 /* Building AX.25 Header */
1587
1588 if (dp != NULL)
1589 SOCK_DEBUG(sk, "Num digipeaters=%d\n", dp->ndigi);
1590 1581
1591 /* Build an AX.25 header */ 1582 /* Build an AX.25 header */
1592 lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call, 1583 lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call,
1593 dp, AX25_COMMAND, AX25_MODULUS); 1584 dp, AX25_COMMAND, AX25_MODULUS);
1594 1585
1595 SOCK_DEBUG(sk, "Built header (%d bytes)\n",lv);
1596
1597 skb_set_transport_header(skb, lv); 1586 skb_set_transport_header(skb, lv);
1598 1587
1599 SOCK_DEBUG(sk, "base=%p pos=%p\n",
1600 skb->data, skb_transport_header(skb));
1601
1602 *skb_transport_header(skb) = AX25_UI; 1588 *skb_transport_header(skb) = AX25_UI;
1603 1589
1604 /* Datagram frames go straight out of the door as UI */ 1590 /* Datagram frames go straight out of the door as UI */
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 5a0dda8df492..60b545e2822a 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ax25_register_pid);
58 58
59void ax25_protocol_release(unsigned int pid) 59void ax25_protocol_release(unsigned int pid)
60{ 60{
61 struct ax25_protocol *s, *protocol; 61 struct ax25_protocol *protocol;
62 62
63 write_lock_bh(&protocol_list_lock); 63 write_lock_bh(&protocol_list_lock);
64 protocol = protocol_list; 64 protocol = protocol_list;
@@ -72,7 +72,6 @@ void ax25_protocol_release(unsigned int pid)
72 72
73 while (protocol != NULL && protocol->next != NULL) { 73 while (protocol != NULL && protocol->next != NULL) {
74 if (protocol->next->pid == pid) { 74 if (protocol->next->pid == pid) {
75 s = protocol->next;
76 protocol->next = protocol->next->next; 75 protocol->next = protocol->next->next;
77 goto out; 76 goto out;
78 } 77 }
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index af45d6b2031f..c11788c4c1a1 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -95,7 +95,6 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
95 return false; 95 return false;
96} 96}
97 97
98#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
99/* create a new aggregated packet and add this packet to it */ 98/* create a new aggregated packet and add this packet to it */
100static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, 99static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
101 unsigned long send_time, bool direct_link, 100 unsigned long send_time, bool direct_link,
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 3cc43558cf9c..65f39530799d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -23,6 +23,7 @@
23#include "gateway_client.h" 23#include "gateway_client.h"
24#include "gateway_common.h" 24#include "gateway_common.h"
25#include "hard-interface.h" 25#include "hard-interface.h"
26#include "originator.h"
26#include <linux/ip.h> 27#include <linux/ip.h>
27#include <linux/ipv6.h> 28#include <linux/ipv6.h>
28#include <linux/udp.h> 29#include <linux/udp.h>
@@ -42,61 +43,76 @@ static void gw_node_free_ref(struct gw_node *gw_node)
42 call_rcu(&gw_node->rcu, gw_node_free_rcu); 43 call_rcu(&gw_node->rcu, gw_node_free_rcu);
43} 44}
44 45
45void *gw_get_selected(struct bat_priv *bat_priv) 46static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv)
46{ 47{
47 struct gw_node *curr_gateway_tmp; 48 struct gw_node *gw_node;
48 struct orig_node *orig_node = NULL;
49 49
50 rcu_read_lock(); 50 rcu_read_lock();
51 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw); 51 gw_node = rcu_dereference(bat_priv->curr_gw);
52 if (!curr_gateway_tmp) 52 if (!gw_node)
53 goto out;
54
55 orig_node = curr_gateway_tmp->orig_node;
56 if (!orig_node)
57 goto out; 53 goto out;
58 54
59 if (!atomic_inc_not_zero(&orig_node->refcount)) 55 if (!atomic_inc_not_zero(&gw_node->refcount))
60 orig_node = NULL; 56 gw_node = NULL;
61 57
62out: 58out:
63 rcu_read_unlock(); 59 rcu_read_unlock();
64 return orig_node; 60 return gw_node;
65} 61}
66 62
67void gw_deselect(struct bat_priv *bat_priv) 63struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv)
68{ 64{
69 struct gw_node *gw_node; 65 struct gw_node *gw_node;
66 struct orig_node *orig_node = NULL;
70 67
71 spin_lock_bh(&bat_priv->gw_list_lock); 68 gw_node = gw_get_selected_gw_node(bat_priv);
72 gw_node = rcu_dereference(bat_priv->curr_gw); 69 if (!gw_node)
73 rcu_assign_pointer(bat_priv->curr_gw, NULL); 70 goto out;
74 spin_unlock_bh(&bat_priv->gw_list_lock); 71
72 rcu_read_lock();
73 orig_node = gw_node->orig_node;
74 if (!orig_node)
75 goto unlock;
76
77 if (!atomic_inc_not_zero(&orig_node->refcount))
78 orig_node = NULL;
75 79
80unlock:
81 rcu_read_unlock();
82out:
76 if (gw_node) 83 if (gw_node)
77 gw_node_free_ref(gw_node); 84 gw_node_free_ref(gw_node);
85 return orig_node;
78} 86}
79 87
80static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node) 88static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
81{ 89{
82 struct gw_node *curr_gw_node; 90 struct gw_node *curr_gw_node;
83 91
92 spin_lock_bh(&bat_priv->gw_list_lock);
93
84 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) 94 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
85 new_gw_node = NULL; 95 new_gw_node = NULL;
86 96
87 spin_lock_bh(&bat_priv->gw_list_lock); 97 curr_gw_node = bat_priv->curr_gw;
88 curr_gw_node = rcu_dereference(bat_priv->curr_gw);
89 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); 98 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
90 spin_unlock_bh(&bat_priv->gw_list_lock);
91 99
92 if (curr_gw_node) 100 if (curr_gw_node)
93 gw_node_free_ref(curr_gw_node); 101 gw_node_free_ref(curr_gw_node);
102
103 spin_unlock_bh(&bat_priv->gw_list_lock);
104}
105
106void gw_deselect(struct bat_priv *bat_priv)
107{
108 gw_select(bat_priv, NULL);
94} 109}
95 110
96void gw_election(struct bat_priv *bat_priv) 111void gw_election(struct bat_priv *bat_priv)
97{ 112{
98 struct hlist_node *node; 113 struct hlist_node *node;
99 struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL; 114 struct gw_node *gw_node, *curr_gw = NULL, *curr_gw_tmp = NULL;
115 struct neigh_node *router;
100 uint8_t max_tq = 0; 116 uint8_t max_tq = 0;
101 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 117 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
102 int down, up; 118 int down, up;
@@ -110,32 +126,25 @@ void gw_election(struct bat_priv *bat_priv)
110 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) 126 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
111 return; 127 return;
112 128
113 rcu_read_lock(); 129 curr_gw = gw_get_selected_gw_node(bat_priv);
114 curr_gw = rcu_dereference(bat_priv->curr_gw); 130 if (curr_gw)
115 if (curr_gw) { 131 goto out;
116 rcu_read_unlock();
117 return;
118 }
119 132
133 rcu_read_lock();
120 if (hlist_empty(&bat_priv->gw_list)) { 134 if (hlist_empty(&bat_priv->gw_list)) {
121 135 bat_dbg(DBG_BATMAN, bat_priv,
122 if (curr_gw) { 136 "Removing selected gateway - "
123 rcu_read_unlock(); 137 "no gateway in range\n");
124 bat_dbg(DBG_BATMAN, bat_priv, 138 gw_deselect(bat_priv);
125 "Removing selected gateway - " 139 goto unlock;
126 "no gateway in range\n");
127 gw_deselect(bat_priv);
128 } else
129 rcu_read_unlock();
130
131 return;
132 } 140 }
133 141
134 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 142 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
135 if (!gw_node->orig_node->router) 143 if (gw_node->deleted)
136 continue; 144 continue;
137 145
138 if (gw_node->deleted) 146 router = orig_node_get_router(gw_node->orig_node);
147 if (!router)
139 continue; 148 continue;
140 149
141 switch (atomic_read(&bat_priv->gw_sel_class)) { 150 switch (atomic_read(&bat_priv->gw_sel_class)) {
@@ -143,15 +152,14 @@ void gw_election(struct bat_priv *bat_priv)
143 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, 152 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
144 &down, &up); 153 &down, &up);
145 154
146 tmp_gw_factor = (gw_node->orig_node->router->tq_avg * 155 tmp_gw_factor = (router->tq_avg * router->tq_avg *
147 gw_node->orig_node->router->tq_avg *
148 down * 100 * 100) / 156 down * 100 * 100) /
149 (TQ_LOCAL_WINDOW_SIZE * 157 (TQ_LOCAL_WINDOW_SIZE *
150 TQ_LOCAL_WINDOW_SIZE * 64); 158 TQ_LOCAL_WINDOW_SIZE * 64);
151 159
152 if ((tmp_gw_factor > max_gw_factor) || 160 if ((tmp_gw_factor > max_gw_factor) ||
153 ((tmp_gw_factor == max_gw_factor) && 161 ((tmp_gw_factor == max_gw_factor) &&
154 (gw_node->orig_node->router->tq_avg > max_tq))) 162 (router->tq_avg > max_tq)))
155 curr_gw_tmp = gw_node; 163 curr_gw_tmp = gw_node;
156 break; 164 break;
157 165
@@ -163,19 +171,25 @@ void gw_election(struct bat_priv *bat_priv)
163 * soon as a better gateway appears which has 171 * soon as a better gateway appears which has
164 * $routing_class more tq points) 172 * $routing_class more tq points)
165 **/ 173 **/
166 if (gw_node->orig_node->router->tq_avg > max_tq) 174 if (router->tq_avg > max_tq)
167 curr_gw_tmp = gw_node; 175 curr_gw_tmp = gw_node;
168 break; 176 break;
169 } 177 }
170 178
171 if (gw_node->orig_node->router->tq_avg > max_tq) 179 if (router->tq_avg > max_tq)
172 max_tq = gw_node->orig_node->router->tq_avg; 180 max_tq = router->tq_avg;
173 181
174 if (tmp_gw_factor > max_gw_factor) 182 if (tmp_gw_factor > max_gw_factor)
175 max_gw_factor = tmp_gw_factor; 183 max_gw_factor = tmp_gw_factor;
184
185 neigh_node_free_ref(router);
176 } 186 }
177 187
178 if (curr_gw != curr_gw_tmp) { 188 if (curr_gw != curr_gw_tmp) {
189 router = orig_node_get_router(curr_gw_tmp->orig_node);
190 if (!router)
191 goto unlock;
192
179 if ((curr_gw) && (!curr_gw_tmp)) 193 if ((curr_gw) && (!curr_gw_tmp))
180 bat_dbg(DBG_BATMAN, bat_priv, 194 bat_dbg(DBG_BATMAN, bat_priv,
181 "Removing selected gateway - " 195 "Removing selected gateway - "
@@ -186,48 +200,50 @@ void gw_election(struct bat_priv *bat_priv)
186 "(gw_flags: %i, tq: %i)\n", 200 "(gw_flags: %i, tq: %i)\n",
187 curr_gw_tmp->orig_node->orig, 201 curr_gw_tmp->orig_node->orig,
188 curr_gw_tmp->orig_node->gw_flags, 202 curr_gw_tmp->orig_node->gw_flags,
189 curr_gw_tmp->orig_node->router->tq_avg); 203 router->tq_avg);
190 else 204 else
191 bat_dbg(DBG_BATMAN, bat_priv, 205 bat_dbg(DBG_BATMAN, bat_priv,
192 "Changing route to gateway %pM " 206 "Changing route to gateway %pM "
193 "(gw_flags: %i, tq: %i)\n", 207 "(gw_flags: %i, tq: %i)\n",
194 curr_gw_tmp->orig_node->orig, 208 curr_gw_tmp->orig_node->orig,
195 curr_gw_tmp->orig_node->gw_flags, 209 curr_gw_tmp->orig_node->gw_flags,
196 curr_gw_tmp->orig_node->router->tq_avg); 210 router->tq_avg);
197 211
212 neigh_node_free_ref(router);
198 gw_select(bat_priv, curr_gw_tmp); 213 gw_select(bat_priv, curr_gw_tmp);
199 } 214 }
200 215
216unlock:
201 rcu_read_unlock(); 217 rcu_read_unlock();
218out:
219 if (curr_gw)
220 gw_node_free_ref(curr_gw);
202} 221}
203 222
204void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) 223void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
205{ 224{
206 struct gw_node *curr_gateway_tmp; 225 struct orig_node *curr_gw_orig;
226 struct neigh_node *router_gw = NULL, *router_orig = NULL;
207 uint8_t gw_tq_avg, orig_tq_avg; 227 uint8_t gw_tq_avg, orig_tq_avg;
208 228
209 rcu_read_lock(); 229 curr_gw_orig = gw_get_selected_orig(bat_priv);
210 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw); 230 if (!curr_gw_orig)
211 if (!curr_gateway_tmp) 231 goto deselect;
212 goto out_rcu;
213
214 if (!curr_gateway_tmp->orig_node)
215 goto deselect_rcu;
216 232
217 if (!curr_gateway_tmp->orig_node->router) 233 router_gw = orig_node_get_router(curr_gw_orig);
218 goto deselect_rcu; 234 if (!router_gw)
235 goto deselect;
219 236
220 /* this node already is the gateway */ 237 /* this node already is the gateway */
221 if (curr_gateway_tmp->orig_node == orig_node) 238 if (curr_gw_orig == orig_node)
222 goto out_rcu; 239 goto out;
223
224 if (!orig_node->router)
225 goto out_rcu;
226 240
227 gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg; 241 router_orig = orig_node_get_router(orig_node);
228 rcu_read_unlock(); 242 if (!router_orig)
243 goto out;
229 244
230 orig_tq_avg = orig_node->router->tq_avg; 245 gw_tq_avg = router_gw->tq_avg;
246 orig_tq_avg = router_orig->tq_avg;
231 247
232 /* the TQ value has to be better */ 248 /* the TQ value has to be better */
233 if (orig_tq_avg < gw_tq_avg) 249 if (orig_tq_avg < gw_tq_avg)
@@ -245,16 +261,17 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
245 "Restarting gateway selection: better gateway found (tq curr: " 261 "Restarting gateway selection: better gateway found (tq curr: "
246 "%i, tq new: %i)\n", 262 "%i, tq new: %i)\n",
247 gw_tq_avg, orig_tq_avg); 263 gw_tq_avg, orig_tq_avg);
248 goto deselect;
249 264
250out_rcu:
251 rcu_read_unlock();
252 goto out;
253deselect_rcu:
254 rcu_read_unlock();
255deselect: 265deselect:
256 gw_deselect(bat_priv); 266 gw_deselect(bat_priv);
257out: 267out:
268 if (curr_gw_orig)
269 orig_node_free_ref(curr_gw_orig);
270 if (router_gw)
271 neigh_node_free_ref(router_gw);
272 if (router_orig)
273 neigh_node_free_ref(router_orig);
274
258 return; 275 return;
259} 276}
260 277
@@ -291,7 +308,15 @@ void gw_node_update(struct bat_priv *bat_priv,
291 struct orig_node *orig_node, uint8_t new_gwflags) 308 struct orig_node *orig_node, uint8_t new_gwflags)
292{ 309{
293 struct hlist_node *node; 310 struct hlist_node *node;
294 struct gw_node *gw_node; 311 struct gw_node *gw_node, *curr_gw;
312
313 /**
314 * Note: We don't need a NULL check here, since curr_gw never gets
315 * dereferenced. If curr_gw is NULL we also should not exit as we may
316 * have this gateway in our list (duplication check!) even though we
317 * have no currently selected gateway.
318 */
319 curr_gw = gw_get_selected_gw_node(bat_priv);
295 320
296 rcu_read_lock(); 321 rcu_read_lock();
297 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 322 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
@@ -312,22 +337,26 @@ void gw_node_update(struct bat_priv *bat_priv,
312 "Gateway %pM removed from gateway list\n", 337 "Gateway %pM removed from gateway list\n",
313 orig_node->orig); 338 orig_node->orig);
314 339
315 if (gw_node == rcu_dereference(bat_priv->curr_gw)) { 340 if (gw_node == curr_gw)
316 rcu_read_unlock(); 341 goto deselect;
317 gw_deselect(bat_priv);
318 return;
319 }
320 } 342 }
321 343
322 rcu_read_unlock(); 344 goto unlock;
323 return;
324 } 345 }
325 rcu_read_unlock();
326 346
327 if (new_gwflags == 0) 347 if (new_gwflags == 0)
328 return; 348 goto unlock;
329 349
330 gw_node_add(bat_priv, orig_node, new_gwflags); 350 gw_node_add(bat_priv, orig_node, new_gwflags);
351 goto unlock;
352
353deselect:
354 gw_deselect(bat_priv);
355unlock:
356 rcu_read_unlock();
357
358 if (curr_gw)
359 gw_node_free_ref(curr_gw);
331} 360}
332 361
333void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node) 362void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
@@ -337,9 +366,12 @@ void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
337 366
338void gw_node_purge(struct bat_priv *bat_priv) 367void gw_node_purge(struct bat_priv *bat_priv)
339{ 368{
340 struct gw_node *gw_node; 369 struct gw_node *gw_node, *curr_gw;
341 struct hlist_node *node, *node_tmp; 370 struct hlist_node *node, *node_tmp;
342 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ; 371 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
372 char do_deselect = 0;
373
374 curr_gw = gw_get_selected_gw_node(bat_priv);
343 375
344 spin_lock_bh(&bat_priv->gw_list_lock); 376 spin_lock_bh(&bat_priv->gw_list_lock);
345 377
@@ -350,41 +382,56 @@ void gw_node_purge(struct bat_priv *bat_priv)
350 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) 382 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
351 continue; 383 continue;
352 384
353 if (rcu_dereference(bat_priv->curr_gw) == gw_node) 385 if (curr_gw == gw_node)
354 gw_deselect(bat_priv); 386 do_deselect = 1;
355 387
356 hlist_del_rcu(&gw_node->list); 388 hlist_del_rcu(&gw_node->list);
357 gw_node_free_ref(gw_node); 389 gw_node_free_ref(gw_node);
358 } 390 }
359 391
360
361 spin_unlock_bh(&bat_priv->gw_list_lock); 392 spin_unlock_bh(&bat_priv->gw_list_lock);
393
394 /* gw_deselect() needs to acquire the gw_list_lock */
395 if (do_deselect)
396 gw_deselect(bat_priv);
397
398 if (curr_gw)
399 gw_node_free_ref(curr_gw);
362} 400}
363 401
402/**
403 * fails if orig_node has no router
404 */
364static int _write_buffer_text(struct bat_priv *bat_priv, 405static int _write_buffer_text(struct bat_priv *bat_priv,
365 struct seq_file *seq, struct gw_node *gw_node) 406 struct seq_file *seq, struct gw_node *gw_node)
366{ 407{
367 struct gw_node *curr_gw; 408 struct gw_node *curr_gw;
368 int down, up, ret; 409 struct neigh_node *router;
410 int down, up, ret = -1;
369 411
370 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); 412 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
371 413
372 rcu_read_lock(); 414 router = orig_node_get_router(gw_node->orig_node);
373 curr_gw = rcu_dereference(bat_priv->curr_gw); 415 if (!router)
416 goto out;
374 417
375 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n", 418 curr_gw = gw_get_selected_gw_node(bat_priv);
376 (curr_gw == gw_node ? "=>" : " "),
377 gw_node->orig_node->orig,
378 gw_node->orig_node->router->tq_avg,
379 gw_node->orig_node->router->addr,
380 gw_node->orig_node->router->if_incoming->net_dev->name,
381 gw_node->orig_node->gw_flags,
382 (down > 2048 ? down / 1024 : down),
383 (down > 2048 ? "MBit" : "KBit"),
384 (up > 2048 ? up / 1024 : up),
385 (up > 2048 ? "MBit" : "KBit"));
386 419
387 rcu_read_unlock(); 420 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
421 (curr_gw == gw_node ? "=>" : " "),
422 gw_node->orig_node->orig,
423 router->tq_avg, router->addr,
424 router->if_incoming->net_dev->name,
425 gw_node->orig_node->gw_flags,
426 (down > 2048 ? down / 1024 : down),
427 (down > 2048 ? "MBit" : "KBit"),
428 (up > 2048 ? up / 1024 : up),
429 (up > 2048 ? "MBit" : "KBit"));
430
431 neigh_node_free_ref(router);
432 if (curr_gw)
433 gw_node_free_ref(curr_gw);
434out:
388 return ret; 435 return ret;
389} 436}
390 437
@@ -392,40 +439,42 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
392{ 439{
393 struct net_device *net_dev = (struct net_device *)seq->private; 440 struct net_device *net_dev = (struct net_device *)seq->private;
394 struct bat_priv *bat_priv = netdev_priv(net_dev); 441 struct bat_priv *bat_priv = netdev_priv(net_dev);
442 struct hard_iface *primary_if;
395 struct gw_node *gw_node; 443 struct gw_node *gw_node;
396 struct hlist_node *node; 444 struct hlist_node *node;
397 int gw_count = 0; 445 int gw_count = 0, ret = 0;
398
399 if (!bat_priv->primary_if) {
400 446
401 return seq_printf(seq, "BATMAN mesh %s disabled - please " 447 primary_if = primary_if_get_selected(bat_priv);
402 "specify interfaces to enable it\n", 448 if (!primary_if) {
403 net_dev->name); 449 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
450 "specify interfaces to enable it\n",
451 net_dev->name);
452 goto out;
404 } 453 }
405 454
406 if (bat_priv->primary_if->if_status != IF_ACTIVE) { 455 if (primary_if->if_status != IF_ACTIVE) {
407 456 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
408 return seq_printf(seq, "BATMAN mesh %s disabled - " 457 "primary interface not active\n",
409 "primary interface not active\n", 458 net_dev->name);
410 net_dev->name); 459 goto out;
411 } 460 }
412 461
413 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... " 462 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
414 "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n", 463 "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
415 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", 464 "Gateway", "#", TQ_MAX_VALUE, "Nexthop",
416 "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR, 465 "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR,
417 bat_priv->primary_if->net_dev->name, 466 primary_if->net_dev->name,
418 bat_priv->primary_if->net_dev->dev_addr, net_dev->name); 467 primary_if->net_dev->dev_addr, net_dev->name);
419 468
420 rcu_read_lock(); 469 rcu_read_lock();
421 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 470 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
422 if (gw_node->deleted) 471 if (gw_node->deleted)
423 continue; 472 continue;
424 473
425 if (!gw_node->orig_node->router) 474 /* fails if orig_node has no router */
475 if (_write_buffer_text(bat_priv, seq, gw_node) < 0)
426 continue; 476 continue;
427 477
428 _write_buffer_text(bat_priv, seq, gw_node);
429 gw_count++; 478 gw_count++;
430 } 479 }
431 rcu_read_unlock(); 480 rcu_read_unlock();
@@ -433,7 +482,10 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
433 if (gw_count == 0) 482 if (gw_count == 0)
434 seq_printf(seq, "No gateways in range ...\n"); 483 seq_printf(seq, "No gateways in range ...\n");
435 484
436 return 0; 485out:
486 if (primary_if)
487 hardif_free_ref(primary_if);
488 return ret;
437} 489}
438 490
439int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb) 491int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
@@ -442,6 +494,7 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
442 struct iphdr *iphdr; 494 struct iphdr *iphdr;
443 struct ipv6hdr *ipv6hdr; 495 struct ipv6hdr *ipv6hdr;
444 struct udphdr *udphdr; 496 struct udphdr *udphdr;
497 struct gw_node *curr_gw;
445 unsigned int header_len = 0; 498 unsigned int header_len = 0;
446 499
447 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF) 500 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
@@ -506,12 +559,11 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
506 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER) 559 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
507 return -1; 560 return -1;
508 561
509 rcu_read_lock(); 562 curr_gw = gw_get_selected_gw_node(bat_priv);
510 if (!rcu_dereference(bat_priv->curr_gw)) { 563 if (!curr_gw)
511 rcu_read_unlock();
512 return 0; 564 return 0;
513 }
514 rcu_read_unlock();
515 565
566 if (curr_gw)
567 gw_node_free_ref(curr_gw);
516 return 1; 568 return 1;
517} 569}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 2aa439124ee3..1ce8c6066da1 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -24,7 +24,7 @@
24 24
25void gw_deselect(struct bat_priv *bat_priv); 25void gw_deselect(struct bat_priv *bat_priv);
26void gw_election(struct bat_priv *bat_priv); 26void gw_election(struct bat_priv *bat_priv);
27void *gw_get_selected(struct bat_priv *bat_priv); 27struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv);
28void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node); 28void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node);
29void gw_node_update(struct bat_priv *bat_priv, 29void gw_node_update(struct bat_priv *bat_priv,
30 struct orig_node *orig_node, uint8_t new_gwflags); 30 struct orig_node *orig_node, uint8_t new_gwflags);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index b3058e46ee6b..3e888f133d75 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -110,47 +110,60 @@ out:
110 return hard_iface; 110 return hard_iface;
111} 111}
112 112
113static void update_primary_addr(struct bat_priv *bat_priv) 113static void primary_if_update_addr(struct bat_priv *bat_priv)
114{ 114{
115 struct vis_packet *vis_packet; 115 struct vis_packet *vis_packet;
116 struct hard_iface *primary_if;
117
118 primary_if = primary_if_get_selected(bat_priv);
119 if (!primary_if)
120 goto out;
116 121
117 vis_packet = (struct vis_packet *) 122 vis_packet = (struct vis_packet *)
118 bat_priv->my_vis_info->skb_packet->data; 123 bat_priv->my_vis_info->skb_packet->data;
119 memcpy(vis_packet->vis_orig, 124 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
120 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
121 memcpy(vis_packet->sender_orig, 125 memcpy(vis_packet->sender_orig,
122 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 126 primary_if->net_dev->dev_addr, ETH_ALEN);
127
128out:
129 if (primary_if)
130 hardif_free_ref(primary_if);
123} 131}
124 132
125static void set_primary_if(struct bat_priv *bat_priv, 133static void primary_if_select(struct bat_priv *bat_priv,
126 struct hard_iface *hard_iface) 134 struct hard_iface *new_hard_iface)
127{ 135{
136 struct hard_iface *curr_hard_iface;
128 struct batman_packet *batman_packet; 137 struct batman_packet *batman_packet;
129 struct hard_iface *old_if;
130 138
131 if (hard_iface && !atomic_inc_not_zero(&hard_iface->refcount)) 139 spin_lock_bh(&hardif_list_lock);
132 hard_iface = NULL;
133 140
134 old_if = bat_priv->primary_if; 141 if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount))
135 bat_priv->primary_if = hard_iface; 142 new_hard_iface = NULL;
136 143
137 if (old_if) 144 curr_hard_iface = bat_priv->primary_if;
138 hardif_free_ref(old_if); 145 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
139 146
140 if (!bat_priv->primary_if) 147 if (curr_hard_iface)
141 return; 148 hardif_free_ref(curr_hard_iface);
142 149
143 batman_packet = (struct batman_packet *)(hard_iface->packet_buff); 150 if (!new_hard_iface)
151 goto out;
152
153 batman_packet = (struct batman_packet *)(new_hard_iface->packet_buff);
144 batman_packet->flags = PRIMARIES_FIRST_HOP; 154 batman_packet->flags = PRIMARIES_FIRST_HOP;
145 batman_packet->ttl = TTL; 155 batman_packet->ttl = TTL;
146 156
147 update_primary_addr(bat_priv); 157 primary_if_update_addr(bat_priv);
148 158
149 /*** 159 /***
150 * hacky trick to make sure that we send the HNA information via 160 * hacky trick to make sure that we send the HNA information via
151 * our new primary interface 161 * our new primary interface
152 */ 162 */
153 atomic_set(&bat_priv->hna_local_changed, 1); 163 atomic_set(&bat_priv->hna_local_changed, 1);
164
165out:
166 spin_unlock_bh(&hardif_list_lock);
154} 167}
155 168
156static bool hardif_is_iface_up(struct hard_iface *hard_iface) 169static bool hardif_is_iface_up(struct hard_iface *hard_iface)
@@ -236,9 +249,10 @@ void update_min_mtu(struct net_device *soft_iface)
236static void hardif_activate_interface(struct hard_iface *hard_iface) 249static void hardif_activate_interface(struct hard_iface *hard_iface)
237{ 250{
238 struct bat_priv *bat_priv; 251 struct bat_priv *bat_priv;
252 struct hard_iface *primary_if = NULL;
239 253
240 if (hard_iface->if_status != IF_INACTIVE) 254 if (hard_iface->if_status != IF_INACTIVE)
241 return; 255 goto out;
242 256
243 bat_priv = netdev_priv(hard_iface->soft_iface); 257 bat_priv = netdev_priv(hard_iface->soft_iface);
244 258
@@ -249,14 +263,18 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
249 * the first active interface becomes our primary interface or 263 * the first active interface becomes our primary interface or
250 * the next active interface after the old primay interface was removed 264 * the next active interface after the old primay interface was removed
251 */ 265 */
252 if (!bat_priv->primary_if) 266 primary_if = primary_if_get_selected(bat_priv);
253 set_primary_if(bat_priv, hard_iface); 267 if (!primary_if)
268 primary_if_select(bat_priv, hard_iface);
254 269
255 bat_info(hard_iface->soft_iface, "Interface activated: %s\n", 270 bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
256 hard_iface->net_dev->name); 271 hard_iface->net_dev->name);
257 272
258 update_min_mtu(hard_iface->soft_iface); 273 update_min_mtu(hard_iface->soft_iface);
259 return; 274
275out:
276 if (primary_if)
277 hardif_free_ref(primary_if);
260} 278}
261 279
262static void hardif_deactivate_interface(struct hard_iface *hard_iface) 280static void hardif_deactivate_interface(struct hard_iface *hard_iface)
@@ -386,12 +404,13 @@ err:
386void hardif_disable_interface(struct hard_iface *hard_iface) 404void hardif_disable_interface(struct hard_iface *hard_iface)
387{ 405{
388 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 406 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
407 struct hard_iface *primary_if = NULL;
389 408
390 if (hard_iface->if_status == IF_ACTIVE) 409 if (hard_iface->if_status == IF_ACTIVE)
391 hardif_deactivate_interface(hard_iface); 410 hardif_deactivate_interface(hard_iface);
392 411
393 if (hard_iface->if_status != IF_INACTIVE) 412 if (hard_iface->if_status != IF_INACTIVE)
394 return; 413 goto out;
395 414
396 bat_info(hard_iface->soft_iface, "Removing interface: %s\n", 415 bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
397 hard_iface->net_dev->name); 416 hard_iface->net_dev->name);
@@ -400,11 +419,12 @@ void hardif_disable_interface(struct hard_iface *hard_iface)
400 bat_priv->num_ifaces--; 419 bat_priv->num_ifaces--;
401 orig_hash_del_if(hard_iface, bat_priv->num_ifaces); 420 orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
402 421
403 if (hard_iface == bat_priv->primary_if) { 422 primary_if = primary_if_get_selected(bat_priv);
423 if (hard_iface == primary_if) {
404 struct hard_iface *new_if; 424 struct hard_iface *new_if;
405 425
406 new_if = hardif_get_active(hard_iface->soft_iface); 426 new_if = hardif_get_active(hard_iface->soft_iface);
407 set_primary_if(bat_priv, new_if); 427 primary_if_select(bat_priv, new_if);
408 428
409 if (new_if) 429 if (new_if)
410 hardif_free_ref(new_if); 430 hardif_free_ref(new_if);
@@ -425,6 +445,10 @@ void hardif_disable_interface(struct hard_iface *hard_iface)
425 445
426 hard_iface->soft_iface = NULL; 446 hard_iface->soft_iface = NULL;
427 hardif_free_ref(hard_iface); 447 hardif_free_ref(hard_iface);
448
449out:
450 if (primary_if)
451 hardif_free_ref(primary_if);
428} 452}
429 453
430static struct hard_iface *hardif_add_interface(struct net_device *net_dev) 454static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
@@ -514,6 +538,7 @@ static int hard_if_event(struct notifier_block *this,
514{ 538{
515 struct net_device *net_dev = (struct net_device *)ptr; 539 struct net_device *net_dev = (struct net_device *)ptr;
516 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); 540 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
541 struct hard_iface *primary_if = NULL;
517 struct bat_priv *bat_priv; 542 struct bat_priv *bat_priv;
518 543
519 if (!hard_iface && event == NETDEV_REGISTER) 544 if (!hard_iface && event == NETDEV_REGISTER)
@@ -549,8 +574,12 @@ static int hard_if_event(struct notifier_block *this,
549 update_mac_addresses(hard_iface); 574 update_mac_addresses(hard_iface);
550 575
551 bat_priv = netdev_priv(hard_iface->soft_iface); 576 bat_priv = netdev_priv(hard_iface->soft_iface);
552 if (hard_iface == bat_priv->primary_if) 577 primary_if = primary_if_get_selected(bat_priv);
553 update_primary_addr(bat_priv); 578 if (!primary_if)
579 goto hardif_put;
580
581 if (hard_iface == primary_if)
582 primary_if_update_addr(bat_priv);
554 break; 583 break;
555 default: 584 default:
556 break; 585 break;
@@ -559,6 +588,8 @@ static int hard_if_event(struct notifier_block *this,
559hardif_put: 588hardif_put:
560 hardif_free_ref(hard_iface); 589 hardif_free_ref(hard_iface);
561out: 590out:
591 if (primary_if)
592 hardif_free_ref(primary_if);
562 return NOTIFY_DONE; 593 return NOTIFY_DONE;
563} 594}
564 595
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index a9ddf36e51c8..64265991460b 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -45,4 +45,22 @@ static inline void hardif_free_ref(struct hard_iface *hard_iface)
45 call_rcu(&hard_iface->rcu, hardif_free_rcu); 45 call_rcu(&hard_iface->rcu, hardif_free_rcu);
46} 46}
47 47
48static inline struct hard_iface *primary_if_get_selected(
49 struct bat_priv *bat_priv)
50{
51 struct hard_iface *hard_iface;
52
53 rcu_read_lock();
54 hard_iface = rcu_dereference(bat_priv->primary_if);
55 if (!hard_iface)
56 goto out;
57
58 if (!atomic_inc_not_zero(&hard_iface->refcount))
59 hard_iface = NULL;
60
61out:
62 rcu_read_unlock();
63 return hard_iface;
64}
65
48#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */ 66#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 34ce56c358e5..fa22ba2bb832 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -153,6 +153,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
153{ 153{
154 struct socket_client *socket_client = file->private_data; 154 struct socket_client *socket_client = file->private_data;
155 struct bat_priv *bat_priv = socket_client->bat_priv; 155 struct bat_priv *bat_priv = socket_client->bat_priv;
156 struct hard_iface *primary_if = NULL;
156 struct sk_buff *skb; 157 struct sk_buff *skb;
157 struct icmp_packet_rr *icmp_packet; 158 struct icmp_packet_rr *icmp_packet;
158 159
@@ -167,15 +168,21 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
167 return -EINVAL; 168 return -EINVAL;
168 } 169 }
169 170
170 if (!bat_priv->primary_if) 171 primary_if = primary_if_get_selected(bat_priv);
171 return -EFAULT; 172
173 if (!primary_if) {
174 len = -EFAULT;
175 goto out;
176 }
172 177
173 if (len >= sizeof(struct icmp_packet_rr)) 178 if (len >= sizeof(struct icmp_packet_rr))
174 packet_len = sizeof(struct icmp_packet_rr); 179 packet_len = sizeof(struct icmp_packet_rr);
175 180
176 skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr)); 181 skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr));
177 if (!skb) 182 if (!skb) {
178 return -ENOMEM; 183 len = -ENOMEM;
184 goto out;
185 }
179 186
180 skb_reserve(skb, sizeof(struct ethhdr)); 187 skb_reserve(skb, sizeof(struct ethhdr));
181 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); 188 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
@@ -218,23 +225,13 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
218 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 225 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
219 goto dst_unreach; 226 goto dst_unreach;
220 227
221 rcu_read_lock();
222 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 228 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
223
224 if (!orig_node) 229 if (!orig_node)
225 goto unlock; 230 goto dst_unreach;
226
227 neigh_node = orig_node->router;
228 231
232 neigh_node = orig_node_get_router(orig_node);
229 if (!neigh_node) 233 if (!neigh_node)
230 goto unlock; 234 goto dst_unreach;
231
232 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
233 neigh_node = NULL;
234 goto unlock;
235 }
236
237 rcu_read_unlock();
238 235
239 if (!neigh_node->if_incoming) 236 if (!neigh_node->if_incoming)
240 goto dst_unreach; 237 goto dst_unreach;
@@ -243,7 +240,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
243 goto dst_unreach; 240 goto dst_unreach;
244 241
245 memcpy(icmp_packet->orig, 242 memcpy(icmp_packet->orig,
246 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 243 primary_if->net_dev->dev_addr, ETH_ALEN);
247 244
248 if (packet_len == sizeof(struct icmp_packet_rr)) 245 if (packet_len == sizeof(struct icmp_packet_rr))
249 memcpy(icmp_packet->rr, 246 memcpy(icmp_packet->rr,
@@ -252,14 +249,14 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
252 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 249 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
253 goto out; 250 goto out;
254 251
255unlock:
256 rcu_read_unlock();
257dst_unreach: 252dst_unreach:
258 icmp_packet->msg_type = DESTINATION_UNREACHABLE; 253 icmp_packet->msg_type = DESTINATION_UNREACHABLE;
259 bat_socket_add_packet(socket_client, icmp_packet, packet_len); 254 bat_socket_add_packet(socket_client, icmp_packet, packet_len);
260free_skb: 255free_skb:
261 kfree_skb(skb); 256 kfree_skb(skb);
262out: 257out:
258 if (primary_if)
259 hardif_free_ref(primary_if);
263 if (neigh_node) 260 if (neigh_node)
264 neigh_node_free_ref(neigh_node); 261 neigh_node_free_ref(neigh_node);
265 if (orig_node) 262 if (orig_node)
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index dc248697de71..ace72852ed7b 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -175,4 +175,6 @@ static inline int compare_eth(void *data1, void *data2)
175 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 175 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
176} 176}
177 177
178#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
179
178#endif /* _NET_BATMAN_ADV_MAIN_H_ */ 180#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 0b9133022d2d..ef4a9be7613a 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -70,6 +70,21 @@ void neigh_node_free_ref(struct neigh_node *neigh_node)
70 call_rcu(&neigh_node->rcu, neigh_node_free_rcu); 70 call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
71} 71}
72 72
73/* increases the refcounter of a found router */
74struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
75{
76 struct neigh_node *router;
77
78 rcu_read_lock();
79 router = rcu_dereference(orig_node->router);
80
81 if (router && !atomic_inc_not_zero(&router->refcount))
82 router = NULL;
83
84 rcu_read_unlock();
85 return router;
86}
87
73struct neigh_node *create_neighbor(struct orig_node *orig_node, 88struct neigh_node *create_neighbor(struct orig_node *orig_node,
74 struct orig_node *orig_neigh_node, 89 struct orig_node *orig_neigh_node,
75 uint8_t *neigh, 90 uint8_t *neigh,
@@ -87,6 +102,7 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
87 102
88 INIT_HLIST_NODE(&neigh_node->list); 103 INIT_HLIST_NODE(&neigh_node->list);
89 INIT_LIST_HEAD(&neigh_node->bonding_list); 104 INIT_LIST_HEAD(&neigh_node->bonding_list);
105 spin_lock_init(&neigh_node->tq_lock);
90 106
91 memcpy(neigh_node->addr, neigh, ETH_ALEN); 107 memcpy(neigh_node->addr, neigh, ETH_ALEN);
92 neigh_node->orig_node = orig_neigh_node; 108 neigh_node->orig_node = orig_neigh_node;
@@ -389,29 +405,34 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
389 struct hashtable_t *hash = bat_priv->orig_hash; 405 struct hashtable_t *hash = bat_priv->orig_hash;
390 struct hlist_node *node, *node_tmp; 406 struct hlist_node *node, *node_tmp;
391 struct hlist_head *head; 407 struct hlist_head *head;
408 struct hard_iface *primary_if;
392 struct orig_node *orig_node; 409 struct orig_node *orig_node;
393 struct neigh_node *neigh_node; 410 struct neigh_node *neigh_node, *neigh_node_tmp;
394 int batman_count = 0; 411 int batman_count = 0;
395 int last_seen_secs; 412 int last_seen_secs;
396 int last_seen_msecs; 413 int last_seen_msecs;
397 int i; 414 int i, ret = 0;
398 415
399 if ((!bat_priv->primary_if) || 416 primary_if = primary_if_get_selected(bat_priv);
400 (bat_priv->primary_if->if_status != IF_ACTIVE)) { 417
401 if (!bat_priv->primary_if) 418 if (!primary_if) {
402 return seq_printf(seq, "BATMAN mesh %s disabled - " 419 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
403 "please specify interfaces to enable it\n", 420 "please specify interfaces to enable it\n",
404 net_dev->name); 421 net_dev->name);
422 goto out;
423 }
405 424
406 return seq_printf(seq, "BATMAN mesh %s " 425 if (primary_if->if_status != IF_ACTIVE) {
407 "disabled - primary interface not active\n", 426 ret = seq_printf(seq, "BATMAN mesh %s "
408 net_dev->name); 427 "disabled - primary interface not active\n",
428 net_dev->name);
429 goto out;
409 } 430 }
410 431
411 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n", 432 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
412 SOURCE_VERSION, REVISION_VERSION_STR, 433 SOURCE_VERSION, REVISION_VERSION_STR,
413 bat_priv->primary_if->net_dev->name, 434 primary_if->net_dev->name,
414 bat_priv->primary_if->net_dev->dev_addr, net_dev->name); 435 primary_if->net_dev->dev_addr, net_dev->name);
415 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", 436 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
416 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", 437 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
417 "outgoingIF", "Potential nexthops"); 438 "outgoingIF", "Potential nexthops");
@@ -421,40 +442,47 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
421 442
422 rcu_read_lock(); 443 rcu_read_lock();
423 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 444 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
424 if (!orig_node->router) 445 neigh_node = orig_node_get_router(orig_node);
446 if (!neigh_node)
425 continue; 447 continue;
426 448
427 if (orig_node->router->tq_avg == 0) 449 if (neigh_node->tq_avg == 0)
428 continue; 450 goto next;
429 451
430 last_seen_secs = jiffies_to_msecs(jiffies - 452 last_seen_secs = jiffies_to_msecs(jiffies -
431 orig_node->last_valid) / 1000; 453 orig_node->last_valid) / 1000;
432 last_seen_msecs = jiffies_to_msecs(jiffies - 454 last_seen_msecs = jiffies_to_msecs(jiffies -
433 orig_node->last_valid) % 1000; 455 orig_node->last_valid) % 1000;
434 456
435 neigh_node = orig_node->router;
436 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", 457 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
437 orig_node->orig, last_seen_secs, 458 orig_node->orig, last_seen_secs,
438 last_seen_msecs, neigh_node->tq_avg, 459 last_seen_msecs, neigh_node->tq_avg,
439 neigh_node->addr, 460 neigh_node->addr,
440 neigh_node->if_incoming->net_dev->name); 461 neigh_node->if_incoming->net_dev->name);
441 462
442 hlist_for_each_entry_rcu(neigh_node, node_tmp, 463 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp,
443 &orig_node->neigh_list, list) { 464 &orig_node->neigh_list, list) {
444 seq_printf(seq, " %pM (%3i)", neigh_node->addr, 465 seq_printf(seq, " %pM (%3i)",
445 neigh_node->tq_avg); 466 neigh_node_tmp->addr,
467 neigh_node_tmp->tq_avg);
446 } 468 }
447 469
448 seq_printf(seq, "\n"); 470 seq_printf(seq, "\n");
449 batman_count++; 471 batman_count++;
472
473next:
474 neigh_node_free_ref(neigh_node);
450 } 475 }
451 rcu_read_unlock(); 476 rcu_read_unlock();
452 } 477 }
453 478
454 if ((batman_count == 0)) 479 if (batman_count == 0)
455 seq_printf(seq, "No batman nodes in range ...\n"); 480 seq_printf(seq, "No batman nodes in range ...\n");
456 481
457 return 0; 482out:
483 if (primary_if)
484 hardif_free_ref(primary_if);
485 return ret;
458} 486}
459 487
460static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) 488static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 5cc011057da1..e1d641f27aa9 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -34,6 +34,7 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
34 uint8_t *neigh, 34 uint8_t *neigh,
35 struct hard_iface *if_incoming); 35 struct hard_iface *if_incoming);
36void neigh_node_free_ref(struct neigh_node *neigh_node); 36void neigh_node_free_ref(struct neigh_node *neigh_node);
37struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
37int orig_seq_print_text(struct seq_file *seq, void *offset); 38int orig_seq_print_text(struct seq_file *seq, void *offset);
38int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); 39int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
39int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); 40int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index c172f5d0e05a..49f571553050 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -87,18 +87,20 @@ static void update_route(struct bat_priv *bat_priv,
87 struct neigh_node *neigh_node, 87 struct neigh_node *neigh_node,
88 unsigned char *hna_buff, int hna_buff_len) 88 unsigned char *hna_buff, int hna_buff_len)
89{ 89{
90 struct neigh_node *neigh_node_tmp; 90 struct neigh_node *curr_router;
91
92 curr_router = orig_node_get_router(orig_node);
91 93
92 /* route deleted */ 94 /* route deleted */
93 if ((orig_node->router) && (!neigh_node)) { 95 if ((curr_router) && (!neigh_node)) {
94 96
95 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", 97 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
96 orig_node->orig); 98 orig_node->orig);
97 hna_global_del_orig(bat_priv, orig_node, 99 hna_global_del_orig(bat_priv, orig_node,
98 "originator timed out"); 100 "originator timed out");
99 101
100 /* route added */ 102 /* route added */
101 } else if ((!orig_node->router) && (neigh_node)) { 103 } else if ((!curr_router) && (neigh_node)) {
102 104
103 bat_dbg(DBG_ROUTES, bat_priv, 105 bat_dbg(DBG_ROUTES, bat_priv,
104 "Adding route towards: %pM (via %pM)\n", 106 "Adding route towards: %pM (via %pM)\n",
@@ -106,21 +108,29 @@ static void update_route(struct bat_priv *bat_priv,
106 hna_global_add_orig(bat_priv, orig_node, 108 hna_global_add_orig(bat_priv, orig_node,
107 hna_buff, hna_buff_len); 109 hna_buff, hna_buff_len);
108 110
109 /* route changed */ 111 /* route changed */
110 } else { 112 } else {
111 bat_dbg(DBG_ROUTES, bat_priv, 113 bat_dbg(DBG_ROUTES, bat_priv,
112 "Changing route towards: %pM " 114 "Changing route towards: %pM "
113 "(now via %pM - was via %pM)\n", 115 "(now via %pM - was via %pM)\n",
114 orig_node->orig, neigh_node->addr, 116 orig_node->orig, neigh_node->addr,
115 orig_node->router->addr); 117 curr_router->addr);
116 } 118 }
117 119
120 if (curr_router)
121 neigh_node_free_ref(curr_router);
122
123 /* increase refcount of new best neighbor */
118 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) 124 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
119 neigh_node = NULL; 125 neigh_node = NULL;
120 neigh_node_tmp = orig_node->router; 126
121 orig_node->router = neigh_node; 127 spin_lock_bh(&orig_node->neigh_list_lock);
122 if (neigh_node_tmp) 128 rcu_assign_pointer(orig_node->router, neigh_node);
123 neigh_node_free_ref(neigh_node_tmp); 129 spin_unlock_bh(&orig_node->neigh_list_lock);
130
131 /* decrease refcount of previous best neighbor */
132 if (curr_router)
133 neigh_node_free_ref(curr_router);
124} 134}
125 135
126 136
@@ -128,16 +138,23 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
128 struct neigh_node *neigh_node, unsigned char *hna_buff, 138 struct neigh_node *neigh_node, unsigned char *hna_buff,
129 int hna_buff_len) 139 int hna_buff_len)
130{ 140{
141 struct neigh_node *router = NULL;
131 142
132 if (!orig_node) 143 if (!orig_node)
133 return; 144 goto out;
145
146 router = orig_node_get_router(orig_node);
134 147
135 if (orig_node->router != neigh_node) 148 if (router != neigh_node)
136 update_route(bat_priv, orig_node, neigh_node, 149 update_route(bat_priv, orig_node, neigh_node,
137 hna_buff, hna_buff_len); 150 hna_buff, hna_buff_len);
138 /* may be just HNA changed */ 151 /* may be just HNA changed */
139 else 152 else
140 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len); 153 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
154
155out:
156 if (router)
157 neigh_node_free_ref(router);
141} 158}
142 159
143static int is_bidirectional_neigh(struct orig_node *orig_node, 160static int is_bidirectional_neigh(struct orig_node *orig_node,
@@ -288,8 +305,8 @@ static void bonding_candidate_add(struct orig_node *orig_node,
288 struct neigh_node *neigh_node) 305 struct neigh_node *neigh_node)
289{ 306{
290 struct hlist_node *node; 307 struct hlist_node *node;
291 struct neigh_node *tmp_neigh_node; 308 struct neigh_node *tmp_neigh_node, *router = NULL;
292 uint8_t best_tq, interference_candidate = 0; 309 uint8_t interference_candidate = 0;
293 310
294 spin_lock_bh(&orig_node->neigh_list_lock); 311 spin_lock_bh(&orig_node->neigh_list_lock);
295 312
@@ -298,13 +315,12 @@ static void bonding_candidate_add(struct orig_node *orig_node,
298 neigh_node->orig_node->primary_addr)) 315 neigh_node->orig_node->primary_addr))
299 goto candidate_del; 316 goto candidate_del;
300 317
301 if (!orig_node->router) 318 router = orig_node_get_router(orig_node);
319 if (!router)
302 goto candidate_del; 320 goto candidate_del;
303 321
304 best_tq = orig_node->router->tq_avg;
305
306 /* ... and is good enough to be considered */ 322 /* ... and is good enough to be considered */
307 if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD) 323 if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
308 goto candidate_del; 324 goto candidate_del;
309 325
310 /** 326 /**
@@ -350,7 +366,9 @@ candidate_del:
350 366
351out: 367out:
352 spin_unlock_bh(&orig_node->neigh_list_lock); 368 spin_unlock_bh(&orig_node->neigh_list_lock);
353 return; 369
370 if (router)
371 neigh_node_free_ref(router);
354} 372}
355 373
356/* copy primary address for bonding */ 374/* copy primary address for bonding */
@@ -373,6 +391,7 @@ static void update_orig(struct bat_priv *bat_priv,
373 char is_duplicate) 391 char is_duplicate)
374{ 392{
375 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 393 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
394 struct neigh_node *router = NULL;
376 struct orig_node *orig_node_tmp; 395 struct orig_node *orig_node_tmp;
377 struct hlist_node *node; 396 struct hlist_node *node;
378 int tmp_hna_buff_len; 397 int tmp_hna_buff_len;
@@ -396,10 +415,12 @@ static void update_orig(struct bat_priv *bat_priv,
396 if (is_duplicate) 415 if (is_duplicate)
397 continue; 416 continue;
398 417
418 spin_lock_bh(&tmp_neigh_node->tq_lock);
399 ring_buffer_set(tmp_neigh_node->tq_recv, 419 ring_buffer_set(tmp_neigh_node->tq_recv,
400 &tmp_neigh_node->tq_index, 0); 420 &tmp_neigh_node->tq_index, 0);
401 tmp_neigh_node->tq_avg = 421 tmp_neigh_node->tq_avg =
402 ring_buffer_avg(tmp_neigh_node->tq_recv); 422 ring_buffer_avg(tmp_neigh_node->tq_recv);
423 spin_unlock_bh(&tmp_neigh_node->tq_lock);
403 } 424 }
404 425
405 if (!neigh_node) { 426 if (!neigh_node) {
@@ -424,10 +445,12 @@ static void update_orig(struct bat_priv *bat_priv,
424 orig_node->flags = batman_packet->flags; 445 orig_node->flags = batman_packet->flags;
425 neigh_node->last_valid = jiffies; 446 neigh_node->last_valid = jiffies;
426 447
448 spin_lock_bh(&neigh_node->tq_lock);
427 ring_buffer_set(neigh_node->tq_recv, 449 ring_buffer_set(neigh_node->tq_recv,
428 &neigh_node->tq_index, 450 &neigh_node->tq_index,
429 batman_packet->tq); 451 batman_packet->tq);
430 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); 452 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
453 spin_unlock_bh(&neigh_node->tq_lock);
431 454
432 if (!is_duplicate) { 455 if (!is_duplicate) {
433 orig_node->last_ttl = batman_packet->ttl; 456 orig_node->last_ttl = batman_packet->ttl;
@@ -441,19 +464,18 @@ static void update_orig(struct bat_priv *bat_priv,
441 464
442 /* if this neighbor already is our next hop there is nothing 465 /* if this neighbor already is our next hop there is nothing
443 * to change */ 466 * to change */
444 if (orig_node->router == neigh_node) 467 router = orig_node_get_router(orig_node);
468 if (router == neigh_node)
445 goto update_hna; 469 goto update_hna;
446 470
447 /* if this neighbor does not offer a better TQ we won't consider it */ 471 /* if this neighbor does not offer a better TQ we won't consider it */
448 if ((orig_node->router) && 472 if (router && (router->tq_avg > neigh_node->tq_avg))
449 (orig_node->router->tq_avg > neigh_node->tq_avg))
450 goto update_hna; 473 goto update_hna;
451 474
452 /* if the TQ is the same and the link not more symetric we 475 /* if the TQ is the same and the link not more symetric we
453 * won't consider it either */ 476 * won't consider it either */
454 if ((orig_node->router) && 477 if (router && (neigh_node->tq_avg == router->tq_avg)) {
455 (neigh_node->tq_avg == orig_node->router->tq_avg)) { 478 orig_node_tmp = router->orig_node;
456 orig_node_tmp = orig_node->router->orig_node;
457 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 479 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
458 bcast_own_sum_orig = 480 bcast_own_sum_orig =
459 orig_node_tmp->bcast_own_sum[if_incoming->if_num]; 481 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
@@ -474,7 +496,7 @@ static void update_orig(struct bat_priv *bat_priv,
474 goto update_gw; 496 goto update_gw;
475 497
476update_hna: 498update_hna:
477 update_routes(bat_priv, orig_node, orig_node->router, 499 update_routes(bat_priv, orig_node, router,
478 hna_buff, tmp_hna_buff_len); 500 hna_buff, tmp_hna_buff_len);
479 501
480update_gw: 502update_gw:
@@ -496,6 +518,8 @@ unlock:
496out: 518out:
497 if (neigh_node) 519 if (neigh_node)
498 neigh_node_free_ref(neigh_node); 520 neigh_node_free_ref(neigh_node);
521 if (router)
522 neigh_node_free_ref(router);
499} 523}
500 524
501/* checks whether the host restarted and is in the protection time. 525/* checks whether the host restarted and is in the protection time.
@@ -603,6 +627,8 @@ void receive_bat_packet(struct ethhdr *ethhdr,
603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 627 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
604 struct hard_iface *hard_iface; 628 struct hard_iface *hard_iface;
605 struct orig_node *orig_neigh_node, *orig_node; 629 struct orig_node *orig_neigh_node, *orig_node;
630 struct neigh_node *router = NULL, *router_router = NULL;
631 struct neigh_node *orig_neigh_router = NULL;
606 char has_directlink_flag; 632 char has_directlink_flag;
607 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; 633 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
608 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh; 634 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
@@ -747,14 +773,15 @@ void receive_bat_packet(struct ethhdr *ethhdr,
747 goto out; 773 goto out;
748 } 774 }
749 775
776 router = orig_node_get_router(orig_node);
777 if (router)
778 router_router = orig_node_get_router(router->orig_node);
779
750 /* avoid temporary routing loops */ 780 /* avoid temporary routing loops */
751 if ((orig_node->router) && 781 if (router && router_router &&
752 (orig_node->router->orig_node->router) && 782 (compare_eth(router->addr, batman_packet->prev_sender)) &&
753 (compare_eth(orig_node->router->addr,
754 batman_packet->prev_sender)) &&
755 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) && 783 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
756 (compare_eth(orig_node->router->addr, 784 (compare_eth(router->addr, router_router->addr))) {
757 orig_node->router->orig_node->router->addr))) {
758 bat_dbg(DBG_BATMAN, bat_priv, 785 bat_dbg(DBG_BATMAN, bat_priv,
759 "Drop packet: ignoring all rebroadcast packets that " 786 "Drop packet: ignoring all rebroadcast packets that "
760 "may make me loop (sender: %pM)\n", ethhdr->h_source); 787 "may make me loop (sender: %pM)\n", ethhdr->h_source);
@@ -769,9 +796,11 @@ void receive_bat_packet(struct ethhdr *ethhdr,
769 if (!orig_neigh_node) 796 if (!orig_neigh_node)
770 goto out; 797 goto out;
771 798
799 orig_neigh_router = orig_node_get_router(orig_neigh_node);
800
772 /* drop packet if sender is not a direct neighbor and if we 801 /* drop packet if sender is not a direct neighbor and if we
773 * don't route towards it */ 802 * don't route towards it */
774 if (!is_single_hop_neigh && (!orig_neigh_node->router)) { 803 if (!is_single_hop_neigh && (!orig_neigh_router)) {
775 bat_dbg(DBG_BATMAN, bat_priv, 804 bat_dbg(DBG_BATMAN, bat_priv,
776 "Drop packet: OGM via unknown neighbor!\n"); 805 "Drop packet: OGM via unknown neighbor!\n");
777 goto out_neigh; 806 goto out_neigh;
@@ -825,6 +854,13 @@ out_neigh:
825 if ((orig_neigh_node) && (!is_single_hop_neigh)) 854 if ((orig_neigh_node) && (!is_single_hop_neigh))
826 orig_node_free_ref(orig_neigh_node); 855 orig_node_free_ref(orig_neigh_node);
827out: 856out:
857 if (router)
858 neigh_node_free_ref(router);
859 if (router_router)
860 neigh_node_free_ref(router_router);
861 if (orig_neigh_router)
862 neigh_node_free_ref(orig_neigh_router);
863
828 orig_node_free_ref(orig_node); 864 orig_node_free_ref(orig_node);
829} 865}
830 866
@@ -868,8 +904,9 @@ int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
868static int recv_my_icmp_packet(struct bat_priv *bat_priv, 904static int recv_my_icmp_packet(struct bat_priv *bat_priv,
869 struct sk_buff *skb, size_t icmp_len) 905 struct sk_buff *skb, size_t icmp_len)
870{ 906{
907 struct hard_iface *primary_if = NULL;
871 struct orig_node *orig_node = NULL; 908 struct orig_node *orig_node = NULL;
872 struct neigh_node *neigh_node = NULL; 909 struct neigh_node *router = NULL;
873 struct icmp_packet_rr *icmp_packet; 910 struct icmp_packet_rr *icmp_packet;
874 int ret = NET_RX_DROP; 911 int ret = NET_RX_DROP;
875 912
@@ -881,28 +918,19 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
881 goto out; 918 goto out;
882 } 919 }
883 920
884 if (!bat_priv->primary_if) 921 primary_if = primary_if_get_selected(bat_priv);
922 if (!primary_if)
885 goto out; 923 goto out;
886 924
887 /* answer echo request (ping) */ 925 /* answer echo request (ping) */
888 /* get routing information */ 926 /* get routing information */
889 rcu_read_lock();
890 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 927 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
891
892 if (!orig_node) 928 if (!orig_node)
893 goto unlock; 929 goto out;
894
895 neigh_node = orig_node->router;
896
897 if (!neigh_node)
898 goto unlock;
899
900 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
901 neigh_node = NULL;
902 goto unlock;
903 }
904 930
905 rcu_read_unlock(); 931 router = orig_node_get_router(orig_node);
932 if (!router)
933 goto out;
906 934
907 /* create a copy of the skb, if needed, to modify it. */ 935 /* create a copy of the skb, if needed, to modify it. */
908 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 936 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -911,20 +939,18 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
911 icmp_packet = (struct icmp_packet_rr *)skb->data; 939 icmp_packet = (struct icmp_packet_rr *)skb->data;
912 940
913 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 941 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
914 memcpy(icmp_packet->orig, 942 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
915 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
916 icmp_packet->msg_type = ECHO_REPLY; 943 icmp_packet->msg_type = ECHO_REPLY;
917 icmp_packet->ttl = TTL; 944 icmp_packet->ttl = TTL;
918 945
919 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 946 send_skb_packet(skb, router->if_incoming, router->addr);
920 ret = NET_RX_SUCCESS; 947 ret = NET_RX_SUCCESS;
921 goto out;
922 948
923unlock:
924 rcu_read_unlock();
925out: 949out:
926 if (neigh_node) 950 if (primary_if)
927 neigh_node_free_ref(neigh_node); 951 hardif_free_ref(primary_if);
952 if (router)
953 neigh_node_free_ref(router);
928 if (orig_node) 954 if (orig_node)
929 orig_node_free_ref(orig_node); 955 orig_node_free_ref(orig_node);
930 return ret; 956 return ret;
@@ -933,8 +959,9 @@ out:
933static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, 959static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
934 struct sk_buff *skb) 960 struct sk_buff *skb)
935{ 961{
962 struct hard_iface *primary_if = NULL;
936 struct orig_node *orig_node = NULL; 963 struct orig_node *orig_node = NULL;
937 struct neigh_node *neigh_node = NULL; 964 struct neigh_node *router = NULL;
938 struct icmp_packet *icmp_packet; 965 struct icmp_packet *icmp_packet;
939 int ret = NET_RX_DROP; 966 int ret = NET_RX_DROP;
940 967
@@ -948,27 +975,18 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
948 goto out; 975 goto out;
949 } 976 }
950 977
951 if (!bat_priv->primary_if) 978 primary_if = primary_if_get_selected(bat_priv);
979 if (!primary_if)
952 goto out; 980 goto out;
953 981
954 /* get routing information */ 982 /* get routing information */
955 rcu_read_lock();
956 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 983 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
957
958 if (!orig_node) 984 if (!orig_node)
959 goto unlock; 985 goto out;
960
961 neigh_node = orig_node->router;
962
963 if (!neigh_node)
964 goto unlock;
965
966 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
967 neigh_node = NULL;
968 goto unlock;
969 }
970 986
971 rcu_read_unlock(); 987 router = orig_node_get_router(orig_node);
988 if (!router)
989 goto out;
972 990
973 /* create a copy of the skb, if needed, to modify it. */ 991 /* create a copy of the skb, if needed, to modify it. */
974 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 992 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -977,20 +995,18 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
977 icmp_packet = (struct icmp_packet *)skb->data; 995 icmp_packet = (struct icmp_packet *)skb->data;
978 996
979 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 997 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
980 memcpy(icmp_packet->orig, 998 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
981 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
982 icmp_packet->msg_type = TTL_EXCEEDED; 999 icmp_packet->msg_type = TTL_EXCEEDED;
983 icmp_packet->ttl = TTL; 1000 icmp_packet->ttl = TTL;
984 1001
985 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1002 send_skb_packet(skb, router->if_incoming, router->addr);
986 ret = NET_RX_SUCCESS; 1003 ret = NET_RX_SUCCESS;
987 goto out;
988 1004
989unlock:
990 rcu_read_unlock();
991out: 1005out:
992 if (neigh_node) 1006 if (primary_if)
993 neigh_node_free_ref(neigh_node); 1007 hardif_free_ref(primary_if);
1008 if (router)
1009 neigh_node_free_ref(router);
994 if (orig_node) 1010 if (orig_node)
995 orig_node_free_ref(orig_node); 1011 orig_node_free_ref(orig_node);
996 return ret; 1012 return ret;
@@ -1003,7 +1019,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1003 struct icmp_packet_rr *icmp_packet; 1019 struct icmp_packet_rr *icmp_packet;
1004 struct ethhdr *ethhdr; 1020 struct ethhdr *ethhdr;
1005 struct orig_node *orig_node = NULL; 1021 struct orig_node *orig_node = NULL;
1006 struct neigh_node *neigh_node = NULL; 1022 struct neigh_node *router = NULL;
1007 int hdr_size = sizeof(struct icmp_packet); 1023 int hdr_size = sizeof(struct icmp_packet);
1008 int ret = NET_RX_DROP; 1024 int ret = NET_RX_DROP;
1009 1025
@@ -1050,23 +1066,13 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1050 return recv_icmp_ttl_exceeded(bat_priv, skb); 1066 return recv_icmp_ttl_exceeded(bat_priv, skb);
1051 1067
1052 /* get routing information */ 1068 /* get routing information */
1053 rcu_read_lock();
1054 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 1069 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
1055
1056 if (!orig_node) 1070 if (!orig_node)
1057 goto unlock; 1071 goto out;
1058
1059 neigh_node = orig_node->router;
1060
1061 if (!neigh_node)
1062 goto unlock;
1063
1064 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
1065 neigh_node = NULL;
1066 goto unlock;
1067 }
1068 1072
1069 rcu_read_unlock(); 1073 router = orig_node_get_router(orig_node);
1074 if (!router)
1075 goto out;
1070 1076
1071 /* create a copy of the skb, if needed, to modify it. */ 1077 /* create a copy of the skb, if needed, to modify it. */
1072 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 1078 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -1078,20 +1084,117 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1078 icmp_packet->ttl--; 1084 icmp_packet->ttl--;
1079 1085
1080 /* route it */ 1086 /* route it */
1081 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1087 send_skb_packet(skb, router->if_incoming, router->addr);
1082 ret = NET_RX_SUCCESS; 1088 ret = NET_RX_SUCCESS;
1083 goto out;
1084 1089
1085unlock:
1086 rcu_read_unlock();
1087out: 1090out:
1088 if (neigh_node) 1091 if (router)
1089 neigh_node_free_ref(neigh_node); 1092 neigh_node_free_ref(router);
1090 if (orig_node) 1093 if (orig_node)
1091 orig_node_free_ref(orig_node); 1094 orig_node_free_ref(orig_node);
1092 return ret; 1095 return ret;
1093} 1096}
1094 1097
1098/* In the bonding case, send the packets in a round
1099 * robin fashion over the remaining interfaces.
1100 *
1101 * This method rotates the bonding list and increases the
1102 * returned router's refcount. */
1103static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
1104 struct hard_iface *recv_if)
1105{
1106 struct neigh_node *tmp_neigh_node;
1107 struct neigh_node *router = NULL, *first_candidate = NULL;
1108
1109 rcu_read_lock();
1110 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
1111 bonding_list) {
1112 if (!first_candidate)
1113 first_candidate = tmp_neigh_node;
1114
1115 /* recv_if == NULL on the first node. */
1116 if (tmp_neigh_node->if_incoming == recv_if)
1117 continue;
1118
1119 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1120 continue;
1121
1122 router = tmp_neigh_node;
1123 break;
1124 }
1125
1126 /* use the first candidate if nothing was found. */
1127 if (!router && first_candidate &&
1128 atomic_inc_not_zero(&first_candidate->refcount))
1129 router = first_candidate;
1130
1131 if (!router)
1132 goto out;
1133
1134 /* selected should point to the next element
1135 * after the current router */
1136 spin_lock_bh(&primary_orig->neigh_list_lock);
1137 /* this is a list_move(), which unfortunately
1138 * does not exist as rcu version */
1139 list_del_rcu(&primary_orig->bond_list);
1140 list_add_rcu(&primary_orig->bond_list,
1141 &router->bonding_list);
1142 spin_unlock_bh(&primary_orig->neigh_list_lock);
1143
1144out:
1145 rcu_read_unlock();
1146 return router;
1147}
1148
1149/* Interface Alternating: Use the best of the
1150 * remaining candidates which are not using
1151 * this interface.
1152 *
1153 * Increases the returned router's refcount */
1154static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
1155 struct hard_iface *recv_if)
1156{
1157 struct neigh_node *tmp_neigh_node;
1158 struct neigh_node *router = NULL, *first_candidate = NULL;
1159
1160 rcu_read_lock();
1161 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
1162 bonding_list) {
1163 if (!first_candidate)
1164 first_candidate = tmp_neigh_node;
1165
1166 /* recv_if == NULL on the first node. */
1167 if (tmp_neigh_node->if_incoming == recv_if)
1168 continue;
1169
1170 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1171 continue;
1172
1173 /* if we don't have a router yet
1174 * or this one is better, choose it. */
1175 if ((!router) ||
1176 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1177 /* decrement refcount of
1178 * previously selected router */
1179 if (router)
1180 neigh_node_free_ref(router);
1181
1182 router = tmp_neigh_node;
1183 atomic_inc_not_zero(&router->refcount);
1184 }
1185
1186 neigh_node_free_ref(tmp_neigh_node);
1187 }
1188
1189 /* use the first candidate if nothing was found. */
1190 if (!router && first_candidate &&
1191 atomic_inc_not_zero(&first_candidate->refcount))
1192 router = first_candidate;
1193
1194 rcu_read_unlock();
1195 return router;
1196}
1197
1095/* find a suitable router for this originator, and use 1198/* find a suitable router for this originator, and use
1096 * bonding if possible. increases the found neighbors 1199 * bonding if possible. increases the found neighbors
1097 * refcount.*/ 1200 * refcount.*/
@@ -1101,14 +1204,15 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1101{ 1204{
1102 struct orig_node *primary_orig_node; 1205 struct orig_node *primary_orig_node;
1103 struct orig_node *router_orig; 1206 struct orig_node *router_orig;
1104 struct neigh_node *router, *first_candidate, *tmp_neigh_node; 1207 struct neigh_node *router;
1105 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 1208 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1106 int bonding_enabled; 1209 int bonding_enabled;
1107 1210
1108 if (!orig_node) 1211 if (!orig_node)
1109 return NULL; 1212 return NULL;
1110 1213
1111 if (!orig_node->router) 1214 router = orig_node_get_router(orig_node);
1215 if (!router)
1112 return NULL; 1216 return NULL;
1113 1217
1114 /* without bonding, the first node should 1218 /* without bonding, the first node should
@@ -1117,9 +1221,8 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1117 1221
1118 rcu_read_lock(); 1222 rcu_read_lock();
1119 /* select default router to output */ 1223 /* select default router to output */
1120 router = orig_node->router; 1224 router_orig = router->orig_node;
1121 router_orig = orig_node->router->orig_node; 1225 if (!router_orig) {
1122 if (!router_orig || !atomic_inc_not_zero(&router->refcount)) {
1123 rcu_read_unlock(); 1226 rcu_read_unlock();
1124 return NULL; 1227 return NULL;
1125 } 1228 }
@@ -1151,88 +1254,17 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1151 if (atomic_read(&primary_orig_node->bond_candidates) < 2) 1254 if (atomic_read(&primary_orig_node->bond_candidates) < 2)
1152 goto return_router; 1255 goto return_router;
1153 1256
1154
1155 /* all nodes between should choose a candidate which 1257 /* all nodes between should choose a candidate which
1156 * is is not on the interface where the packet came 1258 * is is not on the interface where the packet came
1157 * in. */ 1259 * in. */
1158 1260
1159 neigh_node_free_ref(router); 1261 neigh_node_free_ref(router);
1160 first_candidate = NULL;
1161 router = NULL;
1162
1163 if (bonding_enabled) {
1164 /* in the bonding case, send the packets in a round
1165 * robin fashion over the remaining interfaces. */
1166
1167 list_for_each_entry_rcu(tmp_neigh_node,
1168 &primary_orig_node->bond_list, bonding_list) {
1169 if (!first_candidate)
1170 first_candidate = tmp_neigh_node;
1171 /* recv_if == NULL on the first node. */
1172 if (tmp_neigh_node->if_incoming != recv_if &&
1173 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
1174 router = tmp_neigh_node;
1175 break;
1176 }
1177 }
1178
1179 /* use the first candidate if nothing was found. */
1180 if (!router && first_candidate &&
1181 atomic_inc_not_zero(&first_candidate->refcount))
1182 router = first_candidate;
1183
1184 if (!router) {
1185 rcu_read_unlock();
1186 return NULL;
1187 }
1188
1189 /* selected should point to the next element
1190 * after the current router */
1191 spin_lock_bh(&primary_orig_node->neigh_list_lock);
1192 /* this is a list_move(), which unfortunately
1193 * does not exist as rcu version */
1194 list_del_rcu(&primary_orig_node->bond_list);
1195 list_add_rcu(&primary_orig_node->bond_list,
1196 &router->bonding_list);
1197 spin_unlock_bh(&primary_orig_node->neigh_list_lock);
1198
1199 } else {
1200 /* if bonding is disabled, use the best of the
1201 * remaining candidates which are not using
1202 * this interface. */
1203 list_for_each_entry_rcu(tmp_neigh_node,
1204 &primary_orig_node->bond_list, bonding_list) {
1205 if (!first_candidate)
1206 first_candidate = tmp_neigh_node;
1207
1208 /* recv_if == NULL on the first node. */
1209 if (tmp_neigh_node->if_incoming == recv_if)
1210 continue;
1211
1212 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1213 continue;
1214
1215 /* if we don't have a router yet
1216 * or this one is better, choose it. */
1217 if ((!router) ||
1218 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1219 /* decrement refcount of
1220 * previously selected router */
1221 if (router)
1222 neigh_node_free_ref(router);
1223 1262
1224 router = tmp_neigh_node; 1263 if (bonding_enabled)
1225 atomic_inc_not_zero(&router->refcount); 1264 router = find_bond_router(primary_orig_node, recv_if);
1226 } 1265 else
1227 1266 router = find_ifalter_router(primary_orig_node, recv_if);
1228 neigh_node_free_ref(tmp_neigh_node);
1229 }
1230 1267
1231 /* use the first candidate if nothing was found. */
1232 if (!router && first_candidate &&
1233 atomic_inc_not_zero(&first_candidate->refcount))
1234 router = first_candidate;
1235 }
1236return_router: 1268return_router:
1237 rcu_read_unlock(); 1269 rcu_read_unlock();
1238 return router; 1270 return router;
@@ -1284,13 +1316,10 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1284 } 1316 }
1285 1317
1286 /* get routing information */ 1318 /* get routing information */
1287 rcu_read_lock();
1288 orig_node = orig_hash_find(bat_priv, unicast_packet->dest); 1319 orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
1289 1320
1290 if (!orig_node) 1321 if (!orig_node)
1291 goto unlock; 1322 goto out;
1292
1293 rcu_read_unlock();
1294 1323
1295 /* find_router() increases neigh_nodes refcount if found. */ 1324 /* find_router() increases neigh_nodes refcount if found. */
1296 neigh_node = find_router(bat_priv, orig_node, recv_if); 1325 neigh_node = find_router(bat_priv, orig_node, recv_if);
@@ -1336,10 +1365,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1336 /* route it */ 1365 /* route it */
1337 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1366 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1338 ret = NET_RX_SUCCESS; 1367 ret = NET_RX_SUCCESS;
1339 goto out;
1340 1368
1341unlock:
1342 rcu_read_unlock();
1343out: 1369out:
1344 if (neigh_node) 1370 if (neigh_node)
1345 neigh_node_free_ref(neigh_node); 1371 neigh_node_free_ref(neigh_node);
@@ -1438,13 +1464,10 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1438 if (bcast_packet->ttl < 2) 1464 if (bcast_packet->ttl < 2)
1439 goto out; 1465 goto out;
1440 1466
1441 rcu_read_lock();
1442 orig_node = orig_hash_find(bat_priv, bcast_packet->orig); 1467 orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
1443 1468
1444 if (!orig_node) 1469 if (!orig_node)
1445 goto rcu_unlock; 1470 goto out;
1446
1447 rcu_read_unlock();
1448 1471
1449 spin_lock_bh(&orig_node->bcast_seqno_lock); 1472 spin_lock_bh(&orig_node->bcast_seqno_lock);
1450 1473
@@ -1475,9 +1498,6 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1475 ret = NET_RX_SUCCESS; 1498 ret = NET_RX_SUCCESS;
1476 goto out; 1499 goto out;
1477 1500
1478rcu_unlock:
1479 rcu_read_unlock();
1480 goto out;
1481spin_unlock: 1501spin_unlock:
1482 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1502 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1483out: 1503out:
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index d49e54d932af..02b541a6dfef 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -244,6 +244,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
244void schedule_own_packet(struct hard_iface *hard_iface) 244void schedule_own_packet(struct hard_iface *hard_iface)
245{ 245{
246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
247 struct hard_iface *primary_if;
247 unsigned long send_time; 248 unsigned long send_time;
248 struct batman_packet *batman_packet; 249 struct batman_packet *batman_packet;
249 int vis_server; 250 int vis_server;
@@ -253,6 +254,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
253 return; 254 return;
254 255
255 vis_server = atomic_read(&bat_priv->vis_mode); 256 vis_server = atomic_read(&bat_priv->vis_mode);
257 primary_if = primary_if_get_selected(bat_priv);
256 258
257 /** 259 /**
258 * the interface gets activated here to avoid race conditions between 260 * the interface gets activated here to avoid race conditions between
@@ -266,7 +268,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
266 268
267 /* if local hna has changed and interface is a primary interface */ 269 /* if local hna has changed and interface is a primary interface */
268 if ((atomic_read(&bat_priv->hna_local_changed)) && 270 if ((atomic_read(&bat_priv->hna_local_changed)) &&
269 (hard_iface == bat_priv->primary_if)) 271 (hard_iface == primary_if))
270 rebuild_batman_packet(bat_priv, hard_iface); 272 rebuild_batman_packet(bat_priv, hard_iface);
271 273
272 /** 274 /**
@@ -284,7 +286,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
284 else 286 else
285 batman_packet->flags &= ~VIS_SERVER; 287 batman_packet->flags &= ~VIS_SERVER;
286 288
287 if ((hard_iface == bat_priv->primary_if) && 289 if ((hard_iface == primary_if) &&
288 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) 290 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
289 batman_packet->gw_flags = 291 batman_packet->gw_flags =
290 (uint8_t)atomic_read(&bat_priv->gw_bandwidth); 292 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
@@ -299,6 +301,9 @@ void schedule_own_packet(struct hard_iface *hard_iface)
299 hard_iface->packet_buff, 301 hard_iface->packet_buff,
300 hard_iface->packet_len, 302 hard_iface->packet_len,
301 hard_iface, 1, send_time); 303 hard_iface, 1, send_time);
304
305 if (primary_if)
306 hardif_free_ref(primary_if);
302} 307}
303 308
304void schedule_forward_packet(struct orig_node *orig_node, 309void schedule_forward_packet(struct orig_node *orig_node,
@@ -308,6 +313,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
308 struct hard_iface *if_incoming) 313 struct hard_iface *if_incoming)
309{ 314{
310 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 315 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
316 struct neigh_node *router;
311 unsigned char in_tq, in_ttl, tq_avg = 0; 317 unsigned char in_tq, in_ttl, tq_avg = 0;
312 unsigned long send_time; 318 unsigned long send_time;
313 319
@@ -316,6 +322,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
316 return; 322 return;
317 } 323 }
318 324
325 router = orig_node_get_router(orig_node);
326
319 in_tq = batman_packet->tq; 327 in_tq = batman_packet->tq;
320 in_ttl = batman_packet->ttl; 328 in_ttl = batman_packet->ttl;
321 329
@@ -324,20 +332,22 @@ void schedule_forward_packet(struct orig_node *orig_node,
324 332
325 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast 333 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
326 * of our best tq value */ 334 * of our best tq value */
327 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { 335 if (router && router->tq_avg != 0) {
328 336
329 /* rebroadcast ogm of best ranking neighbor as is */ 337 /* rebroadcast ogm of best ranking neighbor as is */
330 if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) { 338 if (!compare_eth(router->addr, ethhdr->h_source)) {
331 batman_packet->tq = orig_node->router->tq_avg; 339 batman_packet->tq = router->tq_avg;
332 340
333 if (orig_node->router->last_ttl) 341 if (router->last_ttl)
334 batman_packet->ttl = orig_node->router->last_ttl 342 batman_packet->ttl = router->last_ttl - 1;
335 - 1;
336 } 343 }
337 344
338 tq_avg = orig_node->router->tq_avg; 345 tq_avg = router->tq_avg;
339 } 346 }
340 347
348 if (router)
349 neigh_node_free_ref(router);
350
341 /* apply hop penalty */ 351 /* apply hop penalty */
342 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv); 352 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
343 353
@@ -388,7 +398,6 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
388 send_time); 398 send_time);
389} 399}
390 400
391#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
392/* add a broadcast packet to the queue and setup timers. broadcast packets 401/* add a broadcast packet to the queue and setup timers. broadcast packets
393 * are sent multiple times to increase probability for beeing received. 402 * are sent multiple times to increase probability for beeing received.
394 * 403 *
@@ -399,6 +408,7 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
399 * skb is freed. */ 408 * skb is freed. */
400int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) 409int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
401{ 410{
411 struct hard_iface *primary_if = NULL;
402 struct forw_packet *forw_packet; 412 struct forw_packet *forw_packet;
403 struct bcast_packet *bcast_packet; 413 struct bcast_packet *bcast_packet;
404 414
@@ -407,7 +417,8 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
407 goto out; 417 goto out;
408 } 418 }
409 419
410 if (!bat_priv->primary_if) 420 primary_if = primary_if_get_selected(bat_priv);
421 if (!primary_if)
411 goto out; 422 goto out;
412 423
413 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); 424 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
@@ -426,7 +437,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
426 skb_reset_mac_header(skb); 437 skb_reset_mac_header(skb);
427 438
428 forw_packet->skb = skb; 439 forw_packet->skb = skb;
429 forw_packet->if_incoming = bat_priv->primary_if; 440 forw_packet->if_incoming = primary_if;
430 441
431 /* how often did we send the bcast packet ? */ 442 /* how often did we send the bcast packet ? */
432 forw_packet->num_packets = 0; 443 forw_packet->num_packets = 0;
@@ -439,6 +450,8 @@ packet_free:
439out_and_inc: 450out_and_inc:
440 atomic_inc(&bat_priv->bcast_queue_left); 451 atomic_inc(&bat_priv->bcast_queue_left);
441out: 452out:
453 if (primary_if)
454 hardif_free_ref(primary_if);
442 return NETDEV_TX_BUSY; 455 return NETDEV_TX_BUSY;
443} 456}
444 457
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 824e1f6e50f2..9e5fcd1596cf 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -43,8 +43,6 @@ static void bat_get_drvinfo(struct net_device *dev,
43static u32 bat_get_msglevel(struct net_device *dev); 43static u32 bat_get_msglevel(struct net_device *dev);
44static void bat_set_msglevel(struct net_device *dev, u32 value); 44static void bat_set_msglevel(struct net_device *dev, u32 value);
45static u32 bat_get_link(struct net_device *dev); 45static u32 bat_get_link(struct net_device *dev);
46static u32 bat_get_rx_csum(struct net_device *dev);
47static int bat_set_rx_csum(struct net_device *dev, u32 data);
48 46
49static const struct ethtool_ops bat_ethtool_ops = { 47static const struct ethtool_ops bat_ethtool_ops = {
50 .get_settings = bat_get_settings, 48 .get_settings = bat_get_settings,
@@ -52,8 +50,6 @@ static const struct ethtool_ops bat_ethtool_ops = {
52 .get_msglevel = bat_get_msglevel, 50 .get_msglevel = bat_get_msglevel,
53 .set_msglevel = bat_set_msglevel, 51 .set_msglevel = bat_set_msglevel,
54 .get_link = bat_get_link, 52 .get_link = bat_get_link,
55 .get_rx_csum = bat_get_rx_csum,
56 .set_rx_csum = bat_set_rx_csum
57}; 53};
58 54
59int my_skb_head_push(struct sk_buff *skb, unsigned int len) 55int my_skb_head_push(struct sk_buff *skb, unsigned int len)
@@ -90,10 +86,51 @@ static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
90 call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); 86 call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
91} 87}
92 88
89static struct softif_neigh *softif_neigh_get_selected(struct bat_priv *bat_priv)
90{
91 struct softif_neigh *neigh;
92
93 rcu_read_lock();
94 neigh = rcu_dereference(bat_priv->softif_neigh);
95
96 if (neigh && !atomic_inc_not_zero(&neigh->refcount))
97 neigh = NULL;
98
99 rcu_read_unlock();
100 return neigh;
101}
102
103static void softif_neigh_select(struct bat_priv *bat_priv,
104 struct softif_neigh *new_neigh)
105{
106 struct softif_neigh *curr_neigh;
107
108 spin_lock_bh(&bat_priv->softif_neigh_lock);
109
110 if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
111 new_neigh = NULL;
112
113 curr_neigh = bat_priv->softif_neigh;
114 rcu_assign_pointer(bat_priv->softif_neigh, new_neigh);
115
116 if (curr_neigh)
117 softif_neigh_free_ref(curr_neigh);
118
119 spin_unlock_bh(&bat_priv->softif_neigh_lock);
120}
121
122static void softif_neigh_deselect(struct bat_priv *bat_priv)
123{
124 softif_neigh_select(bat_priv, NULL);
125}
126
93void softif_neigh_purge(struct bat_priv *bat_priv) 127void softif_neigh_purge(struct bat_priv *bat_priv)
94{ 128{
95 struct softif_neigh *softif_neigh, *softif_neigh_tmp; 129 struct softif_neigh *softif_neigh, *curr_softif_neigh;
96 struct hlist_node *node, *node_tmp; 130 struct hlist_node *node, *node_tmp;
131 char do_deselect = 0;
132
133 curr_softif_neigh = softif_neigh_get_selected(bat_priv);
97 134
98 spin_lock_bh(&bat_priv->softif_neigh_lock); 135 spin_lock_bh(&bat_priv->softif_neigh_lock);
99 136
@@ -105,22 +142,26 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
105 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)) 142 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
106 continue; 143 continue;
107 144
108 hlist_del_rcu(&softif_neigh->list); 145 if (curr_softif_neigh == softif_neigh) {
109
110 if (bat_priv->softif_neigh == softif_neigh) {
111 bat_dbg(DBG_ROUTES, bat_priv, 146 bat_dbg(DBG_ROUTES, bat_priv,
112 "Current mesh exit point '%pM' vanished " 147 "Current mesh exit point '%pM' vanished "
113 "(vid: %d).\n", 148 "(vid: %d).\n",
114 softif_neigh->addr, softif_neigh->vid); 149 softif_neigh->addr, softif_neigh->vid);
115 softif_neigh_tmp = bat_priv->softif_neigh; 150 do_deselect = 1;
116 bat_priv->softif_neigh = NULL;
117 softif_neigh_free_ref(softif_neigh_tmp);
118 } 151 }
119 152
153 hlist_del_rcu(&softif_neigh->list);
120 softif_neigh_free_ref(softif_neigh); 154 softif_neigh_free_ref(softif_neigh);
121 } 155 }
122 156
123 spin_unlock_bh(&bat_priv->softif_neigh_lock); 157 spin_unlock_bh(&bat_priv->softif_neigh_lock);
158
159 /* soft_neigh_deselect() needs to acquire the softif_neigh_lock */
160 if (do_deselect)
161 softif_neigh_deselect(bat_priv);
162
163 if (curr_softif_neigh)
164 softif_neigh_free_ref(curr_softif_neigh);
124} 165}
125 166
126static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, 167static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
@@ -170,26 +211,44 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
170 struct net_device *net_dev = (struct net_device *)seq->private; 211 struct net_device *net_dev = (struct net_device *)seq->private;
171 struct bat_priv *bat_priv = netdev_priv(net_dev); 212 struct bat_priv *bat_priv = netdev_priv(net_dev);
172 struct softif_neigh *softif_neigh; 213 struct softif_neigh *softif_neigh;
214 struct hard_iface *primary_if;
173 struct hlist_node *node; 215 struct hlist_node *node;
216 struct softif_neigh *curr_softif_neigh;
217 int ret = 0;
218
219 primary_if = primary_if_get_selected(bat_priv);
220 if (!primary_if) {
221 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
222 "please specify interfaces to enable it\n",
223 net_dev->name);
224 goto out;
225 }
174 226
175 if (!bat_priv->primary_if) { 227 if (primary_if->if_status != IF_ACTIVE) {
176 return seq_printf(seq, "BATMAN mesh %s disabled - " 228 ret = seq_printf(seq, "BATMAN mesh %s "
177 "please specify interfaces to enable it\n", 229 "disabled - primary interface not active\n",
178 net_dev->name); 230 net_dev->name);
231 goto out;
179 } 232 }
180 233
181 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); 234 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
182 235
236 curr_softif_neigh = softif_neigh_get_selected(bat_priv);
183 rcu_read_lock(); 237 rcu_read_lock();
184 hlist_for_each_entry_rcu(softif_neigh, node, 238 hlist_for_each_entry_rcu(softif_neigh, node,
185 &bat_priv->softif_neigh_list, list) 239 &bat_priv->softif_neigh_list, list)
186 seq_printf(seq, "%s %pM (vid: %d)\n", 240 seq_printf(seq, "%s %pM (vid: %d)\n",
187 bat_priv->softif_neigh == softif_neigh 241 curr_softif_neigh == softif_neigh
188 ? "=>" : " ", softif_neigh->addr, 242 ? "=>" : " ", softif_neigh->addr,
189 softif_neigh->vid); 243 softif_neigh->vid);
190 rcu_read_unlock(); 244 rcu_read_unlock();
245 if (curr_softif_neigh)
246 softif_neigh_free_ref(curr_softif_neigh);
191 247
192 return 0; 248out:
249 if (primary_if)
250 hardif_free_ref(primary_if);
251 return ret;
193} 252}
194 253
195static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, 254static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
@@ -198,7 +257,9 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
198 struct bat_priv *bat_priv = netdev_priv(dev); 257 struct bat_priv *bat_priv = netdev_priv(dev);
199 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 258 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
200 struct batman_packet *batman_packet; 259 struct batman_packet *batman_packet;
201 struct softif_neigh *softif_neigh, *softif_neigh_tmp; 260 struct softif_neigh *softif_neigh = NULL;
261 struct hard_iface *primary_if = NULL;
262 struct softif_neigh *curr_softif_neigh = NULL;
202 263
203 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) 264 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
204 batman_packet = (struct batman_packet *) 265 batman_packet = (struct batman_packet *)
@@ -207,63 +268,72 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
207 batman_packet = (struct batman_packet *)(skb->data + ETH_HLEN); 268 batman_packet = (struct batman_packet *)(skb->data + ETH_HLEN);
208 269
209 if (batman_packet->version != COMPAT_VERSION) 270 if (batman_packet->version != COMPAT_VERSION)
210 goto err; 271 goto out;
211 272
212 if (batman_packet->packet_type != BAT_PACKET) 273 if (batman_packet->packet_type != BAT_PACKET)
213 goto err; 274 goto out;
214 275
215 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP)) 276 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
216 goto err; 277 goto out;
217 278
218 if (is_my_mac(batman_packet->orig)) 279 if (is_my_mac(batman_packet->orig))
219 goto err; 280 goto out;
220 281
221 softif_neigh = softif_neigh_get(bat_priv, batman_packet->orig, vid); 282 softif_neigh = softif_neigh_get(bat_priv, batman_packet->orig, vid);
222
223 if (!softif_neigh) 283 if (!softif_neigh)
224 goto err; 284 goto out;
285
286 curr_softif_neigh = softif_neigh_get_selected(bat_priv);
287 if (!curr_softif_neigh)
288 goto out;
289
290 if (curr_softif_neigh == softif_neigh)
291 goto out;
225 292
226 if (bat_priv->softif_neigh == softif_neigh) 293 primary_if = primary_if_get_selected(bat_priv);
294 if (!primary_if)
227 goto out; 295 goto out;
228 296
229 /* we got a neighbor but its mac is 'bigger' than ours */ 297 /* we got a neighbor but its mac is 'bigger' than ours */
230 if (memcmp(bat_priv->primary_if->net_dev->dev_addr, 298 if (memcmp(primary_if->net_dev->dev_addr,
231 softif_neigh->addr, ETH_ALEN) < 0) 299 softif_neigh->addr, ETH_ALEN) < 0)
232 goto out; 300 goto out;
233 301
234 /* switch to new 'smallest neighbor' */ 302 /* switch to new 'smallest neighbor' */
235 if ((bat_priv->softif_neigh) && 303 if ((curr_softif_neigh) &&
236 (memcmp(softif_neigh->addr, bat_priv->softif_neigh->addr, 304 (memcmp(softif_neigh->addr, curr_softif_neigh->addr,
237 ETH_ALEN) < 0)) { 305 ETH_ALEN) < 0)) {
238 bat_dbg(DBG_ROUTES, bat_priv, 306 bat_dbg(DBG_ROUTES, bat_priv,
239 "Changing mesh exit point from %pM (vid: %d) " 307 "Changing mesh exit point from %pM (vid: %d) "
240 "to %pM (vid: %d).\n", 308 "to %pM (vid: %d).\n",
241 bat_priv->softif_neigh->addr, 309 curr_softif_neigh->addr,
242 bat_priv->softif_neigh->vid, 310 curr_softif_neigh->vid,
243 softif_neigh->addr, softif_neigh->vid); 311 softif_neigh->addr, softif_neigh->vid);
244 softif_neigh_tmp = bat_priv->softif_neigh; 312
245 bat_priv->softif_neigh = softif_neigh; 313 softif_neigh_select(bat_priv, softif_neigh);
246 softif_neigh_free_ref(softif_neigh_tmp); 314 goto out;
247 /* we need to hold the additional reference */
248 goto err;
249 } 315 }
250 316
251 /* close own batX device and use softif_neigh as exit node */ 317 /* close own batX device and use softif_neigh as exit node */
252 if ((!bat_priv->softif_neigh) && 318 if ((!curr_softif_neigh) &&
253 (memcmp(softif_neigh->addr, 319 (memcmp(softif_neigh->addr,
254 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN) < 0)) { 320 primary_if->net_dev->dev_addr, ETH_ALEN) < 0)) {
255 bat_dbg(DBG_ROUTES, bat_priv, 321 bat_dbg(DBG_ROUTES, bat_priv,
256 "Setting mesh exit point to %pM (vid: %d).\n", 322 "Setting mesh exit point to %pM (vid: %d).\n",
257 softif_neigh->addr, softif_neigh->vid); 323 softif_neigh->addr, softif_neigh->vid);
258 bat_priv->softif_neigh = softif_neigh; 324
259 /* we need to hold the additional reference */ 325 softif_neigh_select(bat_priv, softif_neigh);
260 goto err; 326 goto out;
261 } 327 }
262 328
263out: 329out:
264 softif_neigh_free_ref(softif_neigh);
265err:
266 kfree_skb(skb); 330 kfree_skb(skb);
331 if (softif_neigh)
332 softif_neigh_free_ref(softif_neigh);
333 if (curr_softif_neigh)
334 softif_neigh_free_ref(curr_softif_neigh);
335 if (primary_if)
336 hardif_free_ref(primary_if);
267 return; 337 return;
268} 338}
269 339
@@ -319,8 +389,10 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
319{ 389{
320 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 390 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
321 struct bat_priv *bat_priv = netdev_priv(soft_iface); 391 struct bat_priv *bat_priv = netdev_priv(soft_iface);
392 struct hard_iface *primary_if = NULL;
322 struct bcast_packet *bcast_packet; 393 struct bcast_packet *bcast_packet;
323 struct vlan_ethhdr *vhdr; 394 struct vlan_ethhdr *vhdr;
395 struct softif_neigh *curr_softif_neigh = NULL;
324 int data_len = skb->len, ret; 396 int data_len = skb->len, ret;
325 short vid = -1; 397 short vid = -1;
326 bool do_bcast = false; 398 bool do_bcast = false;
@@ -348,7 +420,8 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
348 * if we have a another chosen mesh exit node in range 420 * if we have a another chosen mesh exit node in range
349 * it will transport the packets to the mesh 421 * it will transport the packets to the mesh
350 */ 422 */
351 if ((bat_priv->softif_neigh) && (bat_priv->softif_neigh->vid == vid)) 423 curr_softif_neigh = softif_neigh_get_selected(bat_priv);
424 if ((curr_softif_neigh) && (curr_softif_neigh->vid == vid))
352 goto dropped; 425 goto dropped;
353 426
354 /* TODO: check this for locks */ 427 /* TODO: check this for locks */
@@ -366,7 +439,8 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
366 439
367 /* ethernet packet should be broadcasted */ 440 /* ethernet packet should be broadcasted */
368 if (do_bcast) { 441 if (do_bcast) {
369 if (!bat_priv->primary_if) 442 primary_if = primary_if_get_selected(bat_priv);
443 if (!primary_if)
370 goto dropped; 444 goto dropped;
371 445
372 if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0) 446 if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0)
@@ -382,7 +456,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
382 /* hw address of first interface is the orig mac because only 456 /* hw address of first interface is the orig mac because only
383 * this mac is known throughout the mesh */ 457 * this mac is known throughout the mesh */
384 memcpy(bcast_packet->orig, 458 memcpy(bcast_packet->orig,
385 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 459 primary_if->net_dev->dev_addr, ETH_ALEN);
386 460
387 /* set broadcast sequence number */ 461 /* set broadcast sequence number */
388 bcast_packet->seqno = 462 bcast_packet->seqno =
@@ -410,6 +484,10 @@ dropped:
410dropped_freed: 484dropped_freed:
411 bat_priv->stats.tx_dropped++; 485 bat_priv->stats.tx_dropped++;
412end: 486end:
487 if (curr_softif_neigh)
488 softif_neigh_free_ref(curr_softif_neigh);
489 if (primary_if)
490 hardif_free_ref(primary_if);
413 return NETDEV_TX_OK; 491 return NETDEV_TX_OK;
414} 492}
415 493
@@ -421,6 +499,7 @@ void interface_rx(struct net_device *soft_iface,
421 struct unicast_packet *unicast_packet; 499 struct unicast_packet *unicast_packet;
422 struct ethhdr *ethhdr; 500 struct ethhdr *ethhdr;
423 struct vlan_ethhdr *vhdr; 501 struct vlan_ethhdr *vhdr;
502 struct softif_neigh *curr_softif_neigh = NULL;
424 short vid = -1; 503 short vid = -1;
425 int ret; 504 int ret;
426 505
@@ -450,7 +529,8 @@ void interface_rx(struct net_device *soft_iface,
450 * if we have a another chosen mesh exit node in range 529 * if we have a another chosen mesh exit node in range
451 * it will transport the packets to the non-mesh network 530 * it will transport the packets to the non-mesh network
452 */ 531 */
453 if ((bat_priv->softif_neigh) && (bat_priv->softif_neigh->vid == vid)) { 532 curr_softif_neigh = softif_neigh_get_selected(bat_priv);
533 if (curr_softif_neigh && (curr_softif_neigh->vid == vid)) {
454 skb_push(skb, hdr_size); 534 skb_push(skb, hdr_size);
455 unicast_packet = (struct unicast_packet *)skb->data; 535 unicast_packet = (struct unicast_packet *)skb->data;
456 536
@@ -461,7 +541,7 @@ void interface_rx(struct net_device *soft_iface,
461 skb_reset_mac_header(skb); 541 skb_reset_mac_header(skb);
462 542
463 memcpy(unicast_packet->dest, 543 memcpy(unicast_packet->dest,
464 bat_priv->softif_neigh->addr, ETH_ALEN); 544 curr_softif_neigh->addr, ETH_ALEN);
465 ret = route_unicast_packet(skb, recv_if); 545 ret = route_unicast_packet(skb, recv_if);
466 if (ret == NET_RX_DROP) 546 if (ret == NET_RX_DROP)
467 goto dropped; 547 goto dropped;
@@ -486,11 +566,13 @@ void interface_rx(struct net_device *soft_iface,
486 soft_iface->last_rx = jiffies; 566 soft_iface->last_rx = jiffies;
487 567
488 netif_rx(skb); 568 netif_rx(skb);
489 return; 569 goto out;
490 570
491dropped: 571dropped:
492 kfree_skb(skb); 572 kfree_skb(skb);
493out: 573out:
574 if (curr_softif_neigh)
575 softif_neigh_free_ref(curr_softif_neigh);
494 return; 576 return;
495} 577}
496 578
@@ -524,6 +606,7 @@ static void interface_setup(struct net_device *dev)
524 dev->hard_start_xmit = interface_tx; 606 dev->hard_start_xmit = interface_tx;
525#endif 607#endif
526 dev->destructor = free_netdev; 608 dev->destructor = free_netdev;
609 dev->tx_queue_len = 0;
527 610
528 /** 611 /**
529 * can't call min_mtu, because the needed variables 612 * can't call min_mtu, because the needed variables
@@ -640,7 +723,7 @@ static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
640{ 723{
641 cmd->supported = 0; 724 cmd->supported = 0;
642 cmd->advertising = 0; 725 cmd->advertising = 0;
643 cmd->speed = SPEED_10; 726 ethtool_cmd_speed_set(cmd, SPEED_10);
644 cmd->duplex = DUPLEX_FULL; 727 cmd->duplex = DUPLEX_FULL;
645 cmd->port = PORT_TP; 728 cmd->port = PORT_TP;
646 cmd->phy_address = 0; 729 cmd->phy_address = 0;
@@ -675,12 +758,3 @@ static u32 bat_get_link(struct net_device *dev)
675 return 1; 758 return 1;
676} 759}
677 760
678static u32 bat_get_rx_csum(struct net_device *dev)
679{
680 return 0;
681}
682
683static int bat_set_rx_csum(struct net_device *dev, u32 data)
684{
685 return -EOPNOTSUPP;
686}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 8d15b48d1692..f931830d630e 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -22,6 +22,7 @@
22#include "main.h" 22#include "main.h"
23#include "translation-table.h" 23#include "translation-table.h"
24#include "soft-interface.h" 24#include "soft-interface.h"
25#include "hard-interface.h"
25#include "hash.h" 26#include "hash.h"
26#include "originator.h" 27#include "originator.h"
27 28
@@ -237,16 +238,26 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
237 struct bat_priv *bat_priv = netdev_priv(net_dev); 238 struct bat_priv *bat_priv = netdev_priv(net_dev);
238 struct hashtable_t *hash = bat_priv->hna_local_hash; 239 struct hashtable_t *hash = bat_priv->hna_local_hash;
239 struct hna_local_entry *hna_local_entry; 240 struct hna_local_entry *hna_local_entry;
241 struct hard_iface *primary_if;
240 struct hlist_node *node; 242 struct hlist_node *node;
241 struct hlist_head *head; 243 struct hlist_head *head;
242 size_t buf_size, pos; 244 size_t buf_size, pos;
243 char *buff; 245 char *buff;
244 int i; 246 int i, ret = 0;
245 247
246 if (!bat_priv->primary_if) { 248 primary_if = primary_if_get_selected(bat_priv);
247 return seq_printf(seq, "BATMAN mesh %s disabled - " 249 if (!primary_if) {
248 "please specify interfaces to enable it\n", 250 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
249 net_dev->name); 251 "please specify interfaces to enable it\n",
252 net_dev->name);
253 goto out;
254 }
255
256 if (primary_if->if_status != IF_ACTIVE) {
257 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
258 "primary interface not active\n",
259 net_dev->name);
260 goto out;
250 } 261 }
251 262
252 seq_printf(seq, "Locally retrieved addresses (from %s) " 263 seq_printf(seq, "Locally retrieved addresses (from %s) "
@@ -269,7 +280,8 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
269 buff = kmalloc(buf_size, GFP_ATOMIC); 280 buff = kmalloc(buf_size, GFP_ATOMIC);
270 if (!buff) { 281 if (!buff) {
271 spin_unlock_bh(&bat_priv->hna_lhash_lock); 282 spin_unlock_bh(&bat_priv->hna_lhash_lock);
272 return -ENOMEM; 283 ret = -ENOMEM;
284 goto out;
273 } 285 }
274 286
275 buff[0] = '\0'; 287 buff[0] = '\0';
@@ -291,7 +303,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
291 303
292 seq_printf(seq, "%s", buff); 304 seq_printf(seq, "%s", buff);
293 kfree(buff); 305 kfree(buff);
294 return 0; 306out:
307 if (primary_if)
308 hardif_free_ref(primary_if);
309 return ret;
295} 310}
296 311
297static void _hna_local_del(struct hlist_node *node, void *arg) 312static void _hna_local_del(struct hlist_node *node, void *arg)
@@ -468,16 +483,26 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
468 struct bat_priv *bat_priv = netdev_priv(net_dev); 483 struct bat_priv *bat_priv = netdev_priv(net_dev);
469 struct hashtable_t *hash = bat_priv->hna_global_hash; 484 struct hashtable_t *hash = bat_priv->hna_global_hash;
470 struct hna_global_entry *hna_global_entry; 485 struct hna_global_entry *hna_global_entry;
486 struct hard_iface *primary_if;
471 struct hlist_node *node; 487 struct hlist_node *node;
472 struct hlist_head *head; 488 struct hlist_head *head;
473 size_t buf_size, pos; 489 size_t buf_size, pos;
474 char *buff; 490 char *buff;
475 int i; 491 int i, ret = 0;
476 492
477 if (!bat_priv->primary_if) { 493 primary_if = primary_if_get_selected(bat_priv);
478 return seq_printf(seq, "BATMAN mesh %s disabled - " 494 if (!primary_if) {
479 "please specify interfaces to enable it\n", 495 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
480 net_dev->name); 496 "specify interfaces to enable it\n",
497 net_dev->name);
498 goto out;
499 }
500
501 if (primary_if->if_status != IF_ACTIVE) {
502 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
503 "primary interface not active\n",
504 net_dev->name);
505 goto out;
481 } 506 }
482 507
483 seq_printf(seq, "Globally announced HNAs received via the mesh %s\n", 508 seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
@@ -499,7 +524,8 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
499 buff = kmalloc(buf_size, GFP_ATOMIC); 524 buff = kmalloc(buf_size, GFP_ATOMIC);
500 if (!buff) { 525 if (!buff) {
501 spin_unlock_bh(&bat_priv->hna_ghash_lock); 526 spin_unlock_bh(&bat_priv->hna_ghash_lock);
502 return -ENOMEM; 527 ret = -ENOMEM;
528 goto out;
503 } 529 }
504 buff[0] = '\0'; 530 buff[0] = '\0';
505 pos = 0; 531 pos = 0;
@@ -522,7 +548,10 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
522 548
523 seq_printf(seq, "%s", buff); 549 seq_printf(seq, "%s", buff);
524 kfree(buff); 550 kfree(buff);
525 return 0; 551out:
552 if (primary_if)
553 hardif_free_ref(primary_if);
554 return ret;
526} 555}
527 556
528static void _hna_global_del_orig(struct bat_priv *bat_priv, 557static void _hna_global_del_orig(struct bat_priv *bat_priv,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 83445cf0cc9f..947bafc6431a 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -67,7 +67,7 @@ struct hard_iface {
67struct orig_node { 67struct orig_node {
68 uint8_t orig[ETH_ALEN]; 68 uint8_t orig[ETH_ALEN];
69 uint8_t primary_addr[ETH_ALEN]; 69 uint8_t primary_addr[ETH_ALEN];
70 struct neigh_node *router; 70 struct neigh_node __rcu *router; /* rcu protected pointer */
71 unsigned long *bcast_own; 71 unsigned long *bcast_own;
72 uint8_t *bcast_own_sum; 72 uint8_t *bcast_own_sum;
73 unsigned long last_valid; 73 unsigned long last_valid;
@@ -83,7 +83,7 @@ struct orig_node {
83 uint32_t last_bcast_seqno; 83 uint32_t last_bcast_seqno;
84 struct hlist_head neigh_list; 84 struct hlist_head neigh_list;
85 struct list_head frag_list; 85 struct list_head frag_list;
86 spinlock_t neigh_list_lock; /* protects neighbor list */ 86 spinlock_t neigh_list_lock; /* protects neigh_list and router */
87 atomic_t refcount; 87 atomic_t refcount;
88 struct rcu_head rcu; 88 struct rcu_head rcu;
89 struct hlist_node hash_entry; 89 struct hlist_node hash_entry;
@@ -125,6 +125,7 @@ struct neigh_node {
125 struct rcu_head rcu; 125 struct rcu_head rcu;
126 struct orig_node *orig_node; 126 struct orig_node *orig_node;
127 struct hard_iface *if_incoming; 127 struct hard_iface *if_incoming;
128 spinlock_t tq_lock; /* protects: tq_recv, tq_index */
128}; 129};
129 130
130 131
@@ -146,9 +147,8 @@ struct bat_priv {
146 atomic_t batman_queue_left; 147 atomic_t batman_queue_left;
147 char num_ifaces; 148 char num_ifaces;
148 struct hlist_head softif_neigh_list; 149 struct hlist_head softif_neigh_list;
149 struct softif_neigh *softif_neigh; 150 struct softif_neigh __rcu *softif_neigh;
150 struct debug_log *debug_log; 151 struct debug_log *debug_log;
151 struct hard_iface *primary_if;
152 struct kobject *mesh_obj; 152 struct kobject *mesh_obj;
153 struct dentry *debug_dir; 153 struct dentry *debug_dir;
154 struct hlist_head forw_bat_list; 154 struct hlist_head forw_bat_list;
@@ -173,6 +173,7 @@ struct bat_priv {
173 struct delayed_work orig_work; 173 struct delayed_work orig_work;
174 struct delayed_work vis_work; 174 struct delayed_work vis_work;
175 struct gw_node __rcu *curr_gw; /* rcu protected pointer */ 175 struct gw_node __rcu *curr_gw; /* rcu protected pointer */
176 struct hard_iface __rcu *primary_if; /* rcu protected pointer */
176 struct vis_info *my_vis_info; 177 struct vis_info *my_vis_info;
177}; 178};
178 179
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 19f84bd443af..b46cbf1507e4 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -221,15 +221,17 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
221 struct hard_iface *hard_iface, uint8_t dstaddr[]) 221 struct hard_iface *hard_iface, uint8_t dstaddr[])
222{ 222{
223 struct unicast_packet tmp_uc, *unicast_packet; 223 struct unicast_packet tmp_uc, *unicast_packet;
224 struct hard_iface *primary_if;
224 struct sk_buff *frag_skb; 225 struct sk_buff *frag_skb;
225 struct unicast_frag_packet *frag1, *frag2; 226 struct unicast_frag_packet *frag1, *frag2;
226 int uc_hdr_len = sizeof(struct unicast_packet); 227 int uc_hdr_len = sizeof(struct unicast_packet);
227 int ucf_hdr_len = sizeof(struct unicast_frag_packet); 228 int ucf_hdr_len = sizeof(struct unicast_frag_packet);
228 int data_len = skb->len - uc_hdr_len; 229 int data_len = skb->len - uc_hdr_len;
229 int large_tail = 0; 230 int large_tail = 0, ret = NET_RX_DROP;
230 uint16_t seqno; 231 uint16_t seqno;
231 232
232 if (!bat_priv->primary_if) 233 primary_if = primary_if_get_selected(bat_priv);
234 if (!primary_if)
233 goto dropped; 235 goto dropped;
234 236
235 frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); 237 frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
@@ -254,7 +256,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
254 frag1->version = COMPAT_VERSION; 256 frag1->version = COMPAT_VERSION;
255 frag1->packet_type = BAT_UNICAST_FRAG; 257 frag1->packet_type = BAT_UNICAST_FRAG;
256 258
257 memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 259 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
258 memcpy(frag2, frag1, sizeof(struct unicast_frag_packet)); 260 memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
259 261
260 if (data_len & 1) 262 if (data_len & 1)
@@ -269,13 +271,17 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
269 271
270 send_skb_packet(skb, hard_iface, dstaddr); 272 send_skb_packet(skb, hard_iface, dstaddr);
271 send_skb_packet(frag_skb, hard_iface, dstaddr); 273 send_skb_packet(frag_skb, hard_iface, dstaddr);
272 return NET_RX_SUCCESS; 274 ret = NET_RX_SUCCESS;
275 goto out;
273 276
274drop_frag: 277drop_frag:
275 kfree_skb(frag_skb); 278 kfree_skb(frag_skb);
276dropped: 279dropped:
277 kfree_skb(skb); 280 kfree_skb(skb);
278 return NET_RX_DROP; 281out:
282 if (primary_if)
283 hardif_free_ref(primary_if);
284 return ret;
279} 285}
280 286
281int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) 287int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
@@ -289,7 +295,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
289 295
290 /* get routing information */ 296 /* get routing information */
291 if (is_multicast_ether_addr(ethhdr->h_dest)) { 297 if (is_multicast_ether_addr(ethhdr->h_dest)) {
292 orig_node = (struct orig_node *)gw_get_selected(bat_priv); 298 orig_node = (struct orig_node *)gw_get_selected_orig(bat_priv);
293 if (orig_node) 299 if (orig_node)
294 goto find_router; 300 goto find_router;
295 } 301 }
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index f90212f42082..c8f571d3b5d4 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -204,6 +204,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
204 204
205int vis_seq_print_text(struct seq_file *seq, void *offset) 205int vis_seq_print_text(struct seq_file *seq, void *offset)
206{ 206{
207 struct hard_iface *primary_if;
207 struct hlist_node *node; 208 struct hlist_node *node;
208 struct hlist_head *head; 209 struct hlist_head *head;
209 struct vis_info *info; 210 struct vis_info *info;
@@ -215,15 +216,18 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
215 HLIST_HEAD(vis_if_list); 216 HLIST_HEAD(vis_if_list);
216 struct if_list_entry *entry; 217 struct if_list_entry *entry;
217 struct hlist_node *pos, *n; 218 struct hlist_node *pos, *n;
218 int i, j; 219 int i, j, ret = 0;
219 int vis_server = atomic_read(&bat_priv->vis_mode); 220 int vis_server = atomic_read(&bat_priv->vis_mode);
220 size_t buff_pos, buf_size; 221 size_t buff_pos, buf_size;
221 char *buff; 222 char *buff;
222 int compare; 223 int compare;
223 224
224 if ((!bat_priv->primary_if) || 225 primary_if = primary_if_get_selected(bat_priv);
225 (vis_server == VIS_TYPE_CLIENT_UPDATE)) 226 if (!primary_if)
226 return 0; 227 goto out;
228
229 if (vis_server == VIS_TYPE_CLIENT_UPDATE)
230 goto out;
227 231
228 buf_size = 1; 232 buf_size = 1;
229 /* Estimate length */ 233 /* Estimate length */
@@ -270,7 +274,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
270 buff = kmalloc(buf_size, GFP_ATOMIC); 274 buff = kmalloc(buf_size, GFP_ATOMIC);
271 if (!buff) { 275 if (!buff) {
272 spin_unlock_bh(&bat_priv->vis_hash_lock); 276 spin_unlock_bh(&bat_priv->vis_hash_lock);
273 return -ENOMEM; 277 ret = -ENOMEM;
278 goto out;
274 } 279 }
275 buff[0] = '\0'; 280 buff[0] = '\0';
276 buff_pos = 0; 281 buff_pos = 0;
@@ -328,7 +333,10 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
328 seq_printf(seq, "%s", buff); 333 seq_printf(seq, "%s", buff);
329 kfree(buff); 334 kfree(buff);
330 335
331 return 0; 336out:
337 if (primary_if)
338 hardif_free_ref(primary_if);
339 return ret;
332} 340}
333 341
334/* add the info packet to the send list, if it was not 342/* add the info packet to the send list, if it was not
@@ -558,6 +566,7 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
558 struct vis_info *info) 566 struct vis_info *info)
559{ 567{
560 struct hashtable_t *hash = bat_priv->orig_hash; 568 struct hashtable_t *hash = bat_priv->orig_hash;
569 struct neigh_node *router;
561 struct hlist_node *node; 570 struct hlist_node *node;
562 struct hlist_head *head; 571 struct hlist_head *head;
563 struct orig_node *orig_node; 572 struct orig_node *orig_node;
@@ -571,13 +580,17 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
571 580
572 rcu_read_lock(); 581 rcu_read_lock();
573 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 582 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
574 if ((orig_node) && (orig_node->router) && 583 router = orig_node_get_router(orig_node);
575 (orig_node->flags & VIS_SERVER) && 584 if (!router)
576 (orig_node->router->tq_avg > best_tq)) { 585 continue;
577 best_tq = orig_node->router->tq_avg; 586
587 if ((orig_node->flags & VIS_SERVER) &&
588 (router->tq_avg > best_tq)) {
589 best_tq = router->tq_avg;
578 memcpy(packet->target_orig, orig_node->orig, 590 memcpy(packet->target_orig, orig_node->orig,
579 ETH_ALEN); 591 ETH_ALEN);
580 } 592 }
593 neigh_node_free_ref(router);
581 } 594 }
582 rcu_read_unlock(); 595 rcu_read_unlock();
583 } 596 }
@@ -605,7 +618,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
605 struct hlist_node *node; 618 struct hlist_node *node;
606 struct hlist_head *head; 619 struct hlist_head *head;
607 struct orig_node *orig_node; 620 struct orig_node *orig_node;
608 struct neigh_node *neigh_node; 621 struct neigh_node *router;
609 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info; 622 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
610 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; 623 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
611 struct vis_info_entry *entry; 624 struct vis_info_entry *entry;
@@ -633,30 +646,32 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
633 646
634 rcu_read_lock(); 647 rcu_read_lock();
635 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 648 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
636 neigh_node = orig_node->router; 649 router = orig_node_get_router(orig_node);
637 650 if (!router)
638 if (!neigh_node)
639 continue; 651 continue;
640 652
641 if (!compare_eth(neigh_node->addr, orig_node->orig)) 653 if (!compare_eth(router->addr, orig_node->orig))
642 continue; 654 goto next;
643 655
644 if (neigh_node->if_incoming->if_status != IF_ACTIVE) 656 if (router->if_incoming->if_status != IF_ACTIVE)
645 continue; 657 goto next;
646 658
647 if (neigh_node->tq_avg < 1) 659 if (router->tq_avg < 1)
648 continue; 660 goto next;
649 661
650 /* fill one entry into buffer. */ 662 /* fill one entry into buffer. */
651 entry = (struct vis_info_entry *) 663 entry = (struct vis_info_entry *)
652 skb_put(info->skb_packet, sizeof(*entry)); 664 skb_put(info->skb_packet, sizeof(*entry));
653 memcpy(entry->src, 665 memcpy(entry->src,
654 neigh_node->if_incoming->net_dev->dev_addr, 666 router->if_incoming->net_dev->dev_addr,
655 ETH_ALEN); 667 ETH_ALEN);
656 memcpy(entry->dest, orig_node->orig, ETH_ALEN); 668 memcpy(entry->dest, orig_node->orig, ETH_ALEN);
657 entry->quality = neigh_node->tq_avg; 669 entry->quality = router->tq_avg;
658 packet->entries++; 670 packet->entries++;
659 671
672next:
673 neigh_node_free_ref(router);
674
660 if (vis_packet_full(info)) 675 if (vis_packet_full(info))
661 goto unlock; 676 goto unlock;
662 } 677 }
@@ -725,6 +740,7 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
725static void broadcast_vis_packet(struct bat_priv *bat_priv, 740static void broadcast_vis_packet(struct bat_priv *bat_priv,
726 struct vis_info *info) 741 struct vis_info *info)
727{ 742{
743 struct neigh_node *router;
728 struct hashtable_t *hash = bat_priv->orig_hash; 744 struct hashtable_t *hash = bat_priv->orig_hash;
729 struct hlist_node *node; 745 struct hlist_node *node;
730 struct hlist_head *head; 746 struct hlist_head *head;
@@ -745,19 +761,26 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
745 rcu_read_lock(); 761 rcu_read_lock();
746 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 762 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
747 /* if it's a vis server and reachable, send it. */ 763 /* if it's a vis server and reachable, send it. */
748 if ((!orig_node) || (!orig_node->router))
749 continue;
750 if (!(orig_node->flags & VIS_SERVER)) 764 if (!(orig_node->flags & VIS_SERVER))
751 continue; 765 continue;
766
767 router = orig_node_get_router(orig_node);
768 if (!router)
769 continue;
770
752 /* don't send it if we already received the packet from 771 /* don't send it if we already received the packet from
753 * this node. */ 772 * this node. */
754 if (recv_list_is_in(bat_priv, &info->recv_list, 773 if (recv_list_is_in(bat_priv, &info->recv_list,
755 orig_node->orig)) 774 orig_node->orig)) {
775 neigh_node_free_ref(router);
756 continue; 776 continue;
777 }
757 778
758 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); 779 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
759 hard_iface = orig_node->router->if_incoming; 780 hard_iface = router->if_incoming;
760 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); 781 memcpy(dstaddr, router->addr, ETH_ALEN);
782
783 neigh_node_free_ref(router);
761 784
762 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 785 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
763 if (skb) 786 if (skb)
@@ -772,60 +795,48 @@ static void unicast_vis_packet(struct bat_priv *bat_priv,
772 struct vis_info *info) 795 struct vis_info *info)
773{ 796{
774 struct orig_node *orig_node; 797 struct orig_node *orig_node;
775 struct neigh_node *neigh_node = NULL; 798 struct neigh_node *router = NULL;
776 struct sk_buff *skb; 799 struct sk_buff *skb;
777 struct vis_packet *packet; 800 struct vis_packet *packet;
778 801
779 packet = (struct vis_packet *)info->skb_packet->data; 802 packet = (struct vis_packet *)info->skb_packet->data;
780 803
781 rcu_read_lock();
782 orig_node = orig_hash_find(bat_priv, packet->target_orig); 804 orig_node = orig_hash_find(bat_priv, packet->target_orig);
783
784 if (!orig_node) 805 if (!orig_node)
785 goto unlock; 806 goto out;
786
787 neigh_node = orig_node->router;
788 807
789 if (!neigh_node) 808 router = orig_node_get_router(orig_node);
790 goto unlock; 809 if (!router)
791 810 goto out;
792 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
793 neigh_node = NULL;
794 goto unlock;
795 }
796
797 rcu_read_unlock();
798 811
799 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 812 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
800 if (skb) 813 if (skb)
801 send_skb_packet(skb, neigh_node->if_incoming, 814 send_skb_packet(skb, router->if_incoming, router->addr);
802 neigh_node->addr);
803 815
804 goto out;
805
806unlock:
807 rcu_read_unlock();
808out: 816out:
809 if (neigh_node) 817 if (router)
810 neigh_node_free_ref(neigh_node); 818 neigh_node_free_ref(router);
811 if (orig_node) 819 if (orig_node)
812 orig_node_free_ref(orig_node); 820 orig_node_free_ref(orig_node);
813 return;
814} 821}
815 822
816/* only send one vis packet. called from send_vis_packets() */ 823/* only send one vis packet. called from send_vis_packets() */
817static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info) 824static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
818{ 825{
826 struct hard_iface *primary_if;
819 struct vis_packet *packet; 827 struct vis_packet *packet;
820 828
829 primary_if = primary_if_get_selected(bat_priv);
830 if (!primary_if)
831 goto out;
832
821 packet = (struct vis_packet *)info->skb_packet->data; 833 packet = (struct vis_packet *)info->skb_packet->data;
822 if (packet->ttl < 2) { 834 if (packet->ttl < 2) {
823 pr_debug("Error - can't send vis packet: ttl exceeded\n"); 835 pr_debug("Error - can't send vis packet: ttl exceeded\n");
824 return; 836 goto out;
825 } 837 }
826 838
827 memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr, 839 memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
828 ETH_ALEN);
829 packet->ttl--; 840 packet->ttl--;
830 841
831 if (is_broadcast_ether_addr(packet->target_orig)) 842 if (is_broadcast_ether_addr(packet->target_orig))
@@ -833,6 +844,10 @@ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
833 else 844 else
834 unicast_vis_packet(bat_priv, info); 845 unicast_vis_packet(bat_priv, info);
835 packet->ttl++; /* restore TTL */ 846 packet->ttl++; /* restore TTL */
847
848out:
849 if (primary_if)
850 hardif_free_ref(primary_if);
836} 851}
837 852
838/* called from timer; send (and maybe generate) vis packet. */ 853/* called from timer; send (and maybe generate) vis packet. */
@@ -859,8 +874,7 @@ static void send_vis_packets(struct work_struct *work)
859 kref_get(&info->refcount); 874 kref_get(&info->refcount);
860 spin_unlock_bh(&bat_priv->vis_hash_lock); 875 spin_unlock_bh(&bat_priv->vis_hash_lock);
861 876
862 if (bat_priv->primary_if) 877 send_vis_packet(bat_priv, info);
863 send_vis_packet(bat_priv, info);
864 878
865 spin_lock_bh(&bat_priv->vis_hash_lock); 879 spin_lock_bh(&bat_priv->vis_hash_lock);
866 send_list_del(info); 880 send_list_del(info);
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 70672544db86..8e6c06158f8e 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -23,88 +23,88 @@
23#include <linux/crc32.h> 23#include <linux/crc32.h>
24#include <net/bluetooth/bluetooth.h> 24#include <net/bluetooth/bluetooth.h>
25 25
26// Limits 26/* Limits */
27#define BNEP_MAX_PROTO_FILTERS 5 27#define BNEP_MAX_PROTO_FILTERS 5
28#define BNEP_MAX_MULTICAST_FILTERS 20 28#define BNEP_MAX_MULTICAST_FILTERS 20
29 29
30// UUIDs 30/* UUIDs */
31#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB 31#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB
32#define BNEP_UUID16 0x02 32#define BNEP_UUID16 0x02
33#define BNEP_UUID32 0x04 33#define BNEP_UUID32 0x04
34#define BNEP_UUID128 0x16 34#define BNEP_UUID128 0x16
35 35
36#define BNEP_SVC_PANU 0x1115 36#define BNEP_SVC_PANU 0x1115
37#define BNEP_SVC_NAP 0x1116 37#define BNEP_SVC_NAP 0x1116
38#define BNEP_SVC_GN 0x1117 38#define BNEP_SVC_GN 0x1117
39 39
40// Packet types 40/* Packet types */
41#define BNEP_GENERAL 0x00 41#define BNEP_GENERAL 0x00
42#define BNEP_CONTROL 0x01 42#define BNEP_CONTROL 0x01
43#define BNEP_COMPRESSED 0x02 43#define BNEP_COMPRESSED 0x02
44#define BNEP_COMPRESSED_SRC_ONLY 0x03 44#define BNEP_COMPRESSED_SRC_ONLY 0x03
45#define BNEP_COMPRESSED_DST_ONLY 0x04 45#define BNEP_COMPRESSED_DST_ONLY 0x04
46 46
47// Control types 47/* Control types */
48#define BNEP_CMD_NOT_UNDERSTOOD 0x00 48#define BNEP_CMD_NOT_UNDERSTOOD 0x00
49#define BNEP_SETUP_CONN_REQ 0x01 49#define BNEP_SETUP_CONN_REQ 0x01
50#define BNEP_SETUP_CONN_RSP 0x02 50#define BNEP_SETUP_CONN_RSP 0x02
51#define BNEP_FILTER_NET_TYPE_SET 0x03 51#define BNEP_FILTER_NET_TYPE_SET 0x03
52#define BNEP_FILTER_NET_TYPE_RSP 0x04 52#define BNEP_FILTER_NET_TYPE_RSP 0x04
53#define BNEP_FILTER_MULTI_ADDR_SET 0x05 53#define BNEP_FILTER_MULTI_ADDR_SET 0x05
54#define BNEP_FILTER_MULTI_ADDR_RSP 0x06 54#define BNEP_FILTER_MULTI_ADDR_RSP 0x06
55 55
56// Extension types 56/* Extension types */
57#define BNEP_EXT_CONTROL 0x00 57#define BNEP_EXT_CONTROL 0x00
58 58
59// Response messages 59/* Response messages */
60#define BNEP_SUCCESS 0x00 60#define BNEP_SUCCESS 0x00
61 61
62#define BNEP_CONN_INVALID_DST 0x01 62#define BNEP_CONN_INVALID_DST 0x01
63#define BNEP_CONN_INVALID_SRC 0x02 63#define BNEP_CONN_INVALID_SRC 0x02
64#define BNEP_CONN_INVALID_SVC 0x03 64#define BNEP_CONN_INVALID_SVC 0x03
65#define BNEP_CONN_NOT_ALLOWED 0x04 65#define BNEP_CONN_NOT_ALLOWED 0x04
66 66
67#define BNEP_FILTER_UNSUPPORTED_REQ 0x01 67#define BNEP_FILTER_UNSUPPORTED_REQ 0x01
68#define BNEP_FILTER_INVALID_RANGE 0x02 68#define BNEP_FILTER_INVALID_RANGE 0x02
69#define BNEP_FILTER_INVALID_MCADDR 0x02 69#define BNEP_FILTER_INVALID_MCADDR 0x02
70#define BNEP_FILTER_LIMIT_REACHED 0x03 70#define BNEP_FILTER_LIMIT_REACHED 0x03
71#define BNEP_FILTER_DENIED_SECURITY 0x04 71#define BNEP_FILTER_DENIED_SECURITY 0x04
72 72
73// L2CAP settings 73/* L2CAP settings */
74#define BNEP_MTU 1691 74#define BNEP_MTU 1691
75#define BNEP_PSM 0x0f 75#define BNEP_PSM 0x0f
76#define BNEP_FLUSH_TO 0xffff 76#define BNEP_FLUSH_TO 0xffff
77#define BNEP_CONNECT_TO 15 77#define BNEP_CONNECT_TO 15
78#define BNEP_FILTER_TO 15 78#define BNEP_FILTER_TO 15
79 79
80// Headers 80/* Headers */
81#define BNEP_TYPE_MASK 0x7f 81#define BNEP_TYPE_MASK 0x7f
82#define BNEP_EXT_HEADER 0x80 82#define BNEP_EXT_HEADER 0x80
83 83
84struct bnep_setup_conn_req { 84struct bnep_setup_conn_req {
85 __u8 type; 85 __u8 type;
86 __u8 ctrl; 86 __u8 ctrl;
87 __u8 uuid_size; 87 __u8 uuid_size;
88 __u8 service[0]; 88 __u8 service[0];
89} __packed; 89} __packed;
90 90
91struct bnep_set_filter_req { 91struct bnep_set_filter_req {
92 __u8 type; 92 __u8 type;
93 __u8 ctrl; 93 __u8 ctrl;
94 __be16 len; 94 __be16 len;
95 __u8 list[0]; 95 __u8 list[0];
96} __packed; 96} __packed;
97 97
98struct bnep_control_rsp { 98struct bnep_control_rsp {
99 __u8 type; 99 __u8 type;
100 __u8 ctrl; 100 __u8 ctrl;
101 __be16 resp; 101 __be16 resp;
102} __packed; 102} __packed;
103 103
104struct bnep_ext_hdr { 104struct bnep_ext_hdr {
105 __u8 type; 105 __u8 type;
106 __u8 len; 106 __u8 len;
107 __u8 data[0]; 107 __u8 data[0];
108} __packed; 108} __packed;
109 109
110/* BNEP ioctl defines */ 110/* BNEP ioctl defines */
@@ -114,10 +114,10 @@ struct bnep_ext_hdr {
114#define BNEPGETCONNINFO _IOR('B', 211, int) 114#define BNEPGETCONNINFO _IOR('B', 211, int)
115 115
116struct bnep_connadd_req { 116struct bnep_connadd_req {
117 int sock; // Connected socket 117 int sock; /* Connected socket */
118 __u32 flags; 118 __u32 flags;
119 __u16 role; 119 __u16 role;
120 char device[16]; // Name of the Ethernet device 120 char device[16]; /* Name of the Ethernet device */
121}; 121};
122 122
123struct bnep_conndel_req { 123struct bnep_conndel_req {
@@ -148,14 +148,14 @@ int bnep_del_connection(struct bnep_conndel_req *req);
148int bnep_get_connlist(struct bnep_connlist_req *req); 148int bnep_get_connlist(struct bnep_connlist_req *req);
149int bnep_get_conninfo(struct bnep_conninfo *ci); 149int bnep_get_conninfo(struct bnep_conninfo *ci);
150 150
151// BNEP sessions 151/* BNEP sessions */
152struct bnep_session { 152struct bnep_session {
153 struct list_head list; 153 struct list_head list;
154 154
155 unsigned int role; 155 unsigned int role;
156 unsigned long state; 156 unsigned long state;
157 unsigned long flags; 157 unsigned long flags;
158 atomic_t killed; 158 struct task_struct *task;
159 159
160 struct ethhdr eh; 160 struct ethhdr eh;
161 struct msghdr msg; 161 struct msghdr msg;
@@ -173,7 +173,7 @@ void bnep_sock_cleanup(void);
173 173
174static inline int bnep_mc_hash(__u8 *addr) 174static inline int bnep_mc_hash(__u8 *addr)
175{ 175{
176 return (crc32_be(~0, addr, ETH_ALEN) >> 26); 176 return crc32_be(~0, addr, ETH_ALEN) >> 26;
177} 177}
178 178
179#endif 179#endif
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 03d4d1245d58..ca39fcf010ce 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -36,6 +36,7 @@
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/net.h> 37#include <linux/net.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/kthread.h>
39#include <net/sock.h> 40#include <net/sock.h>
40 41
41#include <linux/socket.h> 42#include <linux/socket.h>
@@ -131,7 +132,8 @@ static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len
131 return -EILSEQ; 132 return -EILSEQ;
132 133
133 n = get_unaligned_be16(data); 134 n = get_unaligned_be16(data);
134 data++; len -= 2; 135 data++;
136 len -= 2;
135 137
136 if (len < n) 138 if (len < n)
137 return -EILSEQ; 139 return -EILSEQ;
@@ -176,7 +178,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
176 return -EILSEQ; 178 return -EILSEQ;
177 179
178 n = get_unaligned_be16(data); 180 n = get_unaligned_be16(data);
179 data += 2; len -= 2; 181 data += 2;
182 len -= 2;
180 183
181 if (len < n) 184 if (len < n)
182 return -EILSEQ; 185 return -EILSEQ;
@@ -187,6 +190,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
187 n /= (ETH_ALEN * 2); 190 n /= (ETH_ALEN * 2);
188 191
189 if (n > 0) { 192 if (n > 0) {
193 int i;
194
190 s->mc_filter = 0; 195 s->mc_filter = 0;
191 196
192 /* Always send broadcast */ 197 /* Always send broadcast */
@@ -196,18 +201,22 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
196 for (; n > 0; n--) { 201 for (; n > 0; n--) {
197 u8 a1[6], *a2; 202 u8 a1[6], *a2;
198 203
199 memcpy(a1, data, ETH_ALEN); data += ETH_ALEN; 204 memcpy(a1, data, ETH_ALEN);
200 a2 = data; data += ETH_ALEN; 205 data += ETH_ALEN;
206 a2 = data;
207 data += ETH_ALEN;
201 208
202 BT_DBG("mc filter %s -> %s", 209 BT_DBG("mc filter %s -> %s",
203 batostr((void *) a1), batostr((void *) a2)); 210 batostr((void *) a1), batostr((void *) a2));
204 211
205 #define INCA(a) { int i = 5; while (i >=0 && ++a[i--] == 0); }
206
207 /* Iterate from a1 to a2 */ 212 /* Iterate from a1 to a2 */
208 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); 213 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
209 while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) { 214 while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) {
210 INCA(a1); 215 /* Increment a1 */
216 i = 5;
217 while (i >= 0 && ++a1[i--] == 0)
218 ;
219
211 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); 220 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
212 } 221 }
213 } 222 }
@@ -227,7 +236,8 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
227 u8 cmd = *(u8 *)data; 236 u8 cmd = *(u8 *)data;
228 int err = 0; 237 int err = 0;
229 238
230 data++; len--; 239 data++;
240 len--;
231 241
232 switch (cmd) { 242 switch (cmd) {
233 case BNEP_CMD_NOT_UNDERSTOOD: 243 case BNEP_CMD_NOT_UNDERSTOOD:
@@ -302,7 +312,6 @@ static u8 __bnep_rx_hlen[] = {
302 ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */ 312 ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */
303 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ 313 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
304}; 314};
305#define BNEP_RX_TYPES (sizeof(__bnep_rx_hlen) - 1)
306 315
307static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) 316static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
308{ 317{
@@ -312,9 +321,10 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
312 321
313 dev->stats.rx_bytes += skb->len; 322 dev->stats.rx_bytes += skb->len;
314 323
315 type = *(u8 *) skb->data; skb_pull(skb, 1); 324 type = *(u8 *) skb->data;
325 skb_pull(skb, 1);
316 326
317 if ((type & BNEP_TYPE_MASK) > BNEP_RX_TYPES) 327 if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen))
318 goto badframe; 328 goto badframe;
319 329
320 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) { 330 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
@@ -367,14 +377,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
367 377
368 case BNEP_COMPRESSED_DST_ONLY: 378 case BNEP_COMPRESSED_DST_ONLY:
369 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), 379 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb),
370 ETH_ALEN); 380 ETH_ALEN);
371 memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source, 381 memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source,
372 ETH_ALEN + 2); 382 ETH_ALEN + 2);
373 break; 383 break;
374 384
375 case BNEP_GENERAL: 385 case BNEP_GENERAL:
376 memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb), 386 memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb),
377 ETH_ALEN * 2); 387 ETH_ALEN * 2);
378 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); 388 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
379 break; 389 break;
380 } 390 }
@@ -470,15 +480,14 @@ static int bnep_session(void *arg)
470 480
471 BT_DBG(""); 481 BT_DBG("");
472 482
473 daemonize("kbnepd %s", dev->name);
474 set_user_nice(current, -15); 483 set_user_nice(current, -15);
475 484
476 init_waitqueue_entry(&wait, current); 485 init_waitqueue_entry(&wait, current);
477 add_wait_queue(sk_sleep(sk), &wait); 486 add_wait_queue(sk_sleep(sk), &wait);
478 while (!atomic_read(&s->killed)) { 487 while (!kthread_should_stop()) {
479 set_current_state(TASK_INTERRUPTIBLE); 488 set_current_state(TASK_INTERRUPTIBLE);
480 489
481 // RX 490 /* RX */
482 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 491 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
483 skb_orphan(skb); 492 skb_orphan(skb);
484 bnep_rx_frame(s, skb); 493 bnep_rx_frame(s, skb);
@@ -487,7 +496,7 @@ static int bnep_session(void *arg)
487 if (sk->sk_state != BT_CONNECTED) 496 if (sk->sk_state != BT_CONNECTED)
488 break; 497 break;
489 498
490 // TX 499 /* TX */
491 while ((skb = skb_dequeue(&sk->sk_write_queue))) 500 while ((skb = skb_dequeue(&sk->sk_write_queue)))
492 if (bnep_tx_frame(s, skb)) 501 if (bnep_tx_frame(s, skb))
493 break; 502 break;
@@ -555,8 +564,8 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
555 564
556 /* session struct allocated as private part of net_device */ 565 /* session struct allocated as private part of net_device */
557 dev = alloc_netdev(sizeof(struct bnep_session), 566 dev = alloc_netdev(sizeof(struct bnep_session),
558 (*req->device) ? req->device : "bnep%d", 567 (*req->device) ? req->device : "bnep%d",
559 bnep_net_setup); 568 bnep_net_setup);
560 if (!dev) 569 if (!dev)
561 return -ENOMEM; 570 return -ENOMEM;
562 571
@@ -571,7 +580,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
571 s = netdev_priv(dev); 580 s = netdev_priv(dev);
572 581
573 /* This is rx header therefore addresses are swapped. 582 /* This is rx header therefore addresses are swapped.
574 * ie eh.h_dest is our local address. */ 583 * ie. eh.h_dest is our local address. */
575 memcpy(s->eh.h_dest, &src, ETH_ALEN); 584 memcpy(s->eh.h_dest, &src, ETH_ALEN);
576 memcpy(s->eh.h_source, &dst, ETH_ALEN); 585 memcpy(s->eh.h_source, &dst, ETH_ALEN);
577 memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN); 586 memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN);
@@ -597,17 +606,17 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
597 SET_NETDEV_DEVTYPE(dev, &bnep_type); 606 SET_NETDEV_DEVTYPE(dev, &bnep_type);
598 607
599 err = register_netdev(dev); 608 err = register_netdev(dev);
600 if (err) { 609 if (err)
601 goto failed; 610 goto failed;
602 }
603 611
604 __bnep_link_session(s); 612 __bnep_link_session(s);
605 613
606 err = kernel_thread(bnep_session, s, CLONE_KERNEL); 614 s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
607 if (err < 0) { 615 if (IS_ERR(s->task)) {
608 /* Session thread start failed, gotta cleanup. */ 616 /* Session thread start failed, gotta cleanup. */
609 unregister_netdev(dev); 617 unregister_netdev(dev);
610 __bnep_unlink_session(s); 618 __bnep_unlink_session(s);
619 err = PTR_ERR(s->task);
611 goto failed; 620 goto failed;
612 } 621 }
613 622
@@ -631,15 +640,9 @@ int bnep_del_connection(struct bnep_conndel_req *req)
631 down_read(&bnep_session_sem); 640 down_read(&bnep_session_sem);
632 641
633 s = __bnep_get_session(req->dst); 642 s = __bnep_get_session(req->dst);
634 if (s) { 643 if (s)
635 /* Wakeup user-space which is polling for socket errors. 644 kthread_stop(s->task);
636 * This is temporary hack until we have shutdown in L2CAP */ 645 else
637 s->sock->sk->sk_err = EUNATCH;
638
639 /* Kill session thread */
640 atomic_inc(&s->killed);
641 wake_up_interruptible(sk_sleep(s->sock->sk));
642 } else
643 err = -ENOENT; 646 err = -ENOENT;
644 647
645 up_read(&bnep_session_sem); 648 up_read(&bnep_session_sem);
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index d935da71ab3b..17800b1d28ea 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -39,10 +39,10 @@
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/compat.h> 40#include <linux/compat.h>
41#include <linux/gfp.h> 41#include <linux/gfp.h>
42#include <linux/uaccess.h>
42#include <net/sock.h> 43#include <net/sock.h>
43 44
44#include <asm/system.h> 45#include <asm/system.h>
45#include <asm/uaccess.h>
46 46
47#include "bnep.h" 47#include "bnep.h"
48 48
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 67cff810c77d..744233cba244 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -35,6 +35,7 @@
35#include <linux/ioctl.h> 35#include <linux/ioctl.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/wait.h> 37#include <linux/wait.h>
38#include <linux/kthread.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include <linux/isdn/capilli.h> 41#include <linux/isdn/capilli.h>
@@ -143,7 +144,7 @@ static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb)
143 144
144 skb_queue_tail(&session->transmit, skb); 145 skb_queue_tail(&session->transmit, skb);
145 146
146 cmtp_schedule(session); 147 wake_up_interruptible(sk_sleep(session->sock->sk));
147} 148}
148 149
149static void cmtp_send_interopmsg(struct cmtp_session *session, 150static void cmtp_send_interopmsg(struct cmtp_session *session,
@@ -386,8 +387,7 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
386 387
387 capi_ctr_down(ctrl); 388 capi_ctr_down(ctrl);
388 389
389 atomic_inc(&session->terminate); 390 kthread_stop(session->task);
390 cmtp_schedule(session);
391} 391}
392 392
393static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) 393static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index 785e79e953c5..db43b54ac9af 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -37,7 +37,7 @@
37#define CMTP_LOOPBACK 0 37#define CMTP_LOOPBACK 0
38 38
39struct cmtp_connadd_req { 39struct cmtp_connadd_req {
40 int sock; // Connected socket 40 int sock; /* Connected socket */
41 __u32 flags; 41 __u32 flags;
42}; 42};
43 43
@@ -81,7 +81,7 @@ struct cmtp_session {
81 81
82 char name[BTNAMSIZ]; 82 char name[BTNAMSIZ];
83 83
84 atomic_t terminate; 84 struct task_struct *task;
85 85
86 wait_queue_head_t wait; 86 wait_queue_head_t wait;
87 87
@@ -121,13 +121,6 @@ void cmtp_detach_device(struct cmtp_session *session);
121 121
122void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb); 122void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb);
123 123
124static inline void cmtp_schedule(struct cmtp_session *session)
125{
126 struct sock *sk = session->sock->sk;
127
128 wake_up_interruptible(sk_sleep(sk));
129}
130
131/* CMTP init defines */ 124/* CMTP init defines */
132int cmtp_init_sockets(void); 125int cmtp_init_sockets(void);
133void cmtp_cleanup_sockets(void); 126void cmtp_cleanup_sockets(void);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 964ea9126f9f..c5b11af908be 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -35,6 +35,7 @@
35#include <linux/ioctl.h> 35#include <linux/ioctl.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/kthread.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include <linux/isdn/capilli.h> 41#include <linux/isdn/capilli.h>
@@ -235,9 +236,12 @@ static void cmtp_process_transmit(struct cmtp_session *session)
235 236
236 size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len); 237 size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len);
237 238
238 if ((scb->id < 0) && ((scb->id = cmtp_alloc_block_id(session)) < 0)) { 239 if (scb->id < 0) {
239 skb_queue_head(&session->transmit, skb); 240 scb->id = cmtp_alloc_block_id(session);
240 break; 241 if (scb->id < 0) {
242 skb_queue_head(&session->transmit, skb);
243 break;
244 }
241 } 245 }
242 246
243 if (size < 256) { 247 if (size < 256) {
@@ -284,12 +288,11 @@ static int cmtp_session(void *arg)
284 288
285 BT_DBG("session %p", session); 289 BT_DBG("session %p", session);
286 290
287 daemonize("kcmtpd_ctr_%d", session->num);
288 set_user_nice(current, -15); 291 set_user_nice(current, -15);
289 292
290 init_waitqueue_entry(&wait, current); 293 init_waitqueue_entry(&wait, current);
291 add_wait_queue(sk_sleep(sk), &wait); 294 add_wait_queue(sk_sleep(sk), &wait);
292 while (!atomic_read(&session->terminate)) { 295 while (!kthread_should_stop()) {
293 set_current_state(TASK_INTERRUPTIBLE); 296 set_current_state(TASK_INTERRUPTIBLE);
294 297
295 if (sk->sk_state != BT_CONNECTED) 298 if (sk->sk_state != BT_CONNECTED)
@@ -343,7 +346,8 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
343 346
344 bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst); 347 bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst);
345 348
346 session->mtu = min_t(uint, l2cap_pi(sock->sk)->omtu, l2cap_pi(sock->sk)->imtu); 349 session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
350 l2cap_pi(sock->sk)->chan->imtu);
347 351
348 BT_DBG("mtu %d", session->mtu); 352 BT_DBG("mtu %d", session->mtu);
349 353
@@ -367,9 +371,12 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
367 371
368 __cmtp_link_session(session); 372 __cmtp_link_session(session);
369 373
370 err = kernel_thread(cmtp_session, session, CLONE_KERNEL); 374 session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
371 if (err < 0) 375 session->num);
376 if (IS_ERR(session->task)) {
377 err = PTR_ERR(session->task);
372 goto unlink; 378 goto unlink;
379 }
373 380
374 if (!(session->flags & (1 << CMTP_LOOPBACK))) { 381 if (!(session->flags & (1 << CMTP_LOOPBACK))) {
375 err = cmtp_attach_device(session); 382 err = cmtp_attach_device(session);
@@ -406,9 +413,8 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
406 /* Flush the transmit queue */ 413 /* Flush the transmit queue */
407 skb_queue_purge(&session->transmit); 414 skb_queue_purge(&session->transmit);
408 415
409 /* Kill session thread */ 416 /* Stop session thread */
410 atomic_inc(&session->terminate); 417 kthread_stop(session->task);
411 cmtp_schedule(session);
412 } else 418 } else
413 err = -ENOENT; 419 err = -ENOENT;
414 420
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 7ea1979a8e4f..3f2dd5c25ae5 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -34,12 +34,12 @@
34#include <linux/file.h> 34#include <linux/file.h>
35#include <linux/compat.h> 35#include <linux/compat.h>
36#include <linux/gfp.h> 36#include <linux/gfp.h>
37#include <linux/uaccess.h>
37#include <net/sock.h> 38#include <net/sock.h>
38 39
39#include <linux/isdn/capilli.h> 40#include <linux/isdn/capilli.h>
40 41
41#include <asm/system.h> 42#include <asm/system.h>
42#include <asm/uaccess.h>
43 43
44#include "cmtp.h" 44#include "cmtp.h"
45 45
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 7a6f56b2f49d..7f5ad8a2b22d 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -269,6 +269,19 @@ static void hci_conn_idle(unsigned long arg)
269 hci_conn_enter_sniff_mode(conn); 269 hci_conn_enter_sniff_mode(conn);
270} 270}
271 271
272static void hci_conn_auto_accept(unsigned long arg)
273{
274 struct hci_conn *conn = (void *) arg;
275 struct hci_dev *hdev = conn->hdev;
276
277 hci_dev_lock(hdev);
278
279 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
280 &conn->dst);
281
282 hci_dev_unlock(hdev);
283}
284
272struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 285struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
273{ 286{
274 struct hci_conn *conn; 287 struct hci_conn *conn;
@@ -287,6 +300,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
287 conn->auth_type = HCI_AT_GENERAL_BONDING; 300 conn->auth_type = HCI_AT_GENERAL_BONDING;
288 conn->io_capability = hdev->io_capability; 301 conn->io_capability = hdev->io_capability;
289 conn->remote_auth = 0xff; 302 conn->remote_auth = 0xff;
303 conn->key_type = 0xff;
290 304
291 conn->power_save = 1; 305 conn->power_save = 1;
292 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 306 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -311,6 +325,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
311 325
312 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); 326 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
313 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 327 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
328 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
329 (unsigned long) conn);
314 330
315 atomic_set(&conn->refcnt, 0); 331 atomic_set(&conn->refcnt, 0);
316 332
@@ -341,6 +357,8 @@ int hci_conn_del(struct hci_conn *conn)
341 357
342 del_timer(&conn->disc_timer); 358 del_timer(&conn->disc_timer);
343 359
360 del_timer(&conn->auto_accept_timer);
361
344 if (conn->type == ACL_LINK) { 362 if (conn->type == ACL_LINK) {
345 struct hci_conn *sco = conn->link; 363 struct hci_conn *sco = conn->link;
346 if (sco) 364 if (sco)
@@ -535,32 +553,72 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
535 return 0; 553 return 0;
536} 554}
537 555
556/* Encrypt the the link */
557static void hci_conn_encrypt(struct hci_conn *conn)
558{
559 BT_DBG("conn %p", conn);
560
561 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
562 struct hci_cp_set_conn_encrypt cp;
563 cp.handle = cpu_to_le16(conn->handle);
564 cp.encrypt = 0x01;
565 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
566 &cp);
567 }
568}
569
538/* Enable security */ 570/* Enable security */
539int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 571int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
540{ 572{
541 BT_DBG("conn %p", conn); 573 BT_DBG("conn %p", conn);
542 574
575 /* For sdp we don't need the link key. */
543 if (sec_level == BT_SECURITY_SDP) 576 if (sec_level == BT_SECURITY_SDP)
544 return 1; 577 return 1;
545 578
579 /* For non 2.1 devices and low security level we don't need the link
580 key. */
546 if (sec_level == BT_SECURITY_LOW && 581 if (sec_level == BT_SECURITY_LOW &&
547 (!conn->ssp_mode || !conn->hdev->ssp_mode)) 582 (!conn->ssp_mode || !conn->hdev->ssp_mode))
548 return 1; 583 return 1;
549 584
550 if (conn->link_mode & HCI_LM_ENCRYPT) 585 /* For other security levels we need the link key. */
551 return hci_conn_auth(conn, sec_level, auth_type); 586 if (!(conn->link_mode & HCI_LM_AUTH))
552 587 goto auth;
588
589 /* An authenticated combination key has sufficient security for any
590 security level. */
591 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
592 goto encrypt;
593
594 /* An unauthenticated combination key has sufficient security for
595 security level 1 and 2. */
596 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
597 (sec_level == BT_SECURITY_MEDIUM ||
598 sec_level == BT_SECURITY_LOW))
599 goto encrypt;
600
601 /* A combination key has always sufficient security for the security
602 levels 1 or 2. High security level requires the combination key
603 is generated using maximum PIN code length (16).
604 For pre 2.1 units. */
605 if (conn->key_type == HCI_LK_COMBINATION &&
606 (sec_level != BT_SECURITY_HIGH ||
607 conn->pin_length == 16))
608 goto encrypt;
609
610auth:
553 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 611 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
554 return 0; 612 return 0;
555 613
556 if (hci_conn_auth(conn, sec_level, auth_type)) { 614 hci_conn_auth(conn, sec_level, auth_type);
557 struct hci_cp_set_conn_encrypt cp; 615 return 0;
558 cp.handle = cpu_to_le16(conn->handle); 616
559 cp.encrypt = 1; 617encrypt:
560 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, 618 if (conn->link_mode & HCI_LM_ENCRYPT)
561 sizeof(cp), &cp); 619 return 1;
562 }
563 620
621 hci_conn_encrypt(conn);
564 return 0; 622 return 0;
565} 623}
566EXPORT_SYMBOL(hci_conn_security); 624EXPORT_SYMBOL(hci_conn_security);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b5a8afc2be33..815269b07f20 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -56,7 +56,6 @@
56static void hci_cmd_task(unsigned long arg); 56static void hci_cmd_task(unsigned long arg);
57static void hci_rx_task(unsigned long arg); 57static void hci_rx_task(unsigned long arg);
58static void hci_tx_task(unsigned long arg); 58static void hci_tx_task(unsigned long arg);
59static void hci_notify(struct hci_dev *hdev, int event);
60 59
61static DEFINE_RWLOCK(hci_task_lock); 60static DEFINE_RWLOCK(hci_task_lock);
62 61
@@ -1021,18 +1020,54 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1021 return NULL; 1020 return NULL;
1022} 1021}
1023 1022
1024int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, 1023static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1025 u8 *val, u8 type, u8 pin_len) 1024 u8 key_type, u8 old_key_type)
1025{
1026 /* Legacy key */
1027 if (key_type < 0x03)
1028 return 1;
1029
1030 /* Debug keys are insecure so don't store them persistently */
1031 if (key_type == HCI_LK_DEBUG_COMBINATION)
1032 return 0;
1033
1034 /* Changed combination key and there's no previous one */
1035 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1036 return 0;
1037
1038 /* Security mode 3 case */
1039 if (!conn)
1040 return 1;
1041
1042 /* Neither local nor remote side had no-bonding as requirement */
1043 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1044 return 1;
1045
1046 /* Local side had dedicated bonding as requirement */
1047 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1048 return 1;
1049
1050 /* Remote side had dedicated bonding as requirement */
1051 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1052 return 1;
1053
1054 /* If none of the above criteria match, then don't store the key
1055 * persistently */
1056 return 0;
1057}
1058
1059int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1060 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1026{ 1061{
1027 struct link_key *key, *old_key; 1062 struct link_key *key, *old_key;
1028 u8 old_key_type; 1063 u8 old_key_type, persistent;
1029 1064
1030 old_key = hci_find_link_key(hdev, bdaddr); 1065 old_key = hci_find_link_key(hdev, bdaddr);
1031 if (old_key) { 1066 if (old_key) {
1032 old_key_type = old_key->type; 1067 old_key_type = old_key->type;
1033 key = old_key; 1068 key = old_key;
1034 } else { 1069 } else {
1035 old_key_type = 0xff; 1070 old_key_type = conn ? conn->key_type : 0xff;
1036 key = kzalloc(sizeof(*key), GFP_ATOMIC); 1071 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1037 if (!key) 1072 if (!key)
1038 return -ENOMEM; 1073 return -ENOMEM;
@@ -1041,16 +1076,37 @@ int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1041 1076
1042 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type); 1077 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1043 1078
1079 /* Some buggy controller combinations generate a changed
1080 * combination key for legacy pairing even when there's no
1081 * previous key */
1082 if (type == HCI_LK_CHANGED_COMBINATION &&
1083 (!conn || conn->remote_auth == 0xff) &&
1084 old_key_type == 0xff) {
1085 type = HCI_LK_COMBINATION;
1086 if (conn)
1087 conn->key_type = type;
1088 }
1089
1044 bacpy(&key->bdaddr, bdaddr); 1090 bacpy(&key->bdaddr, bdaddr);
1045 memcpy(key->val, val, 16); 1091 memcpy(key->val, val, 16);
1046 key->type = type;
1047 key->pin_len = pin_len; 1092 key->pin_len = pin_len;
1048 1093
1049 if (new_key) 1094 if (type == HCI_LK_CHANGED_COMBINATION)
1050 mgmt_new_key(hdev->id, key, old_key_type);
1051
1052 if (type == 0x06)
1053 key->type = old_key_type; 1095 key->type = old_key_type;
1096 else
1097 key->type = type;
1098
1099 if (!new_key)
1100 return 0;
1101
1102 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1103
1104 mgmt_new_key(hdev->id, key, persistent);
1105
1106 if (!persistent) {
1107 list_del(&key->list);
1108 kfree(key);
1109 }
1054 1110
1055 return 0; 1111 return 0;
1056} 1112}
@@ -1082,6 +1138,70 @@ static void hci_cmd_timer(unsigned long arg)
1082 tasklet_schedule(&hdev->cmd_task); 1138 tasklet_schedule(&hdev->cmd_task);
1083} 1139}
1084 1140
1141struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1142 bdaddr_t *bdaddr)
1143{
1144 struct oob_data *data;
1145
1146 list_for_each_entry(data, &hdev->remote_oob_data, list)
1147 if (bacmp(bdaddr, &data->bdaddr) == 0)
1148 return data;
1149
1150 return NULL;
1151}
1152
1153int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1154{
1155 struct oob_data *data;
1156
1157 data = hci_find_remote_oob_data(hdev, bdaddr);
1158 if (!data)
1159 return -ENOENT;
1160
1161 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1162
1163 list_del(&data->list);
1164 kfree(data);
1165
1166 return 0;
1167}
1168
1169int hci_remote_oob_data_clear(struct hci_dev *hdev)
1170{
1171 struct oob_data *data, *n;
1172
1173 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1174 list_del(&data->list);
1175 kfree(data);
1176 }
1177
1178 return 0;
1179}
1180
1181int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1182 u8 *randomizer)
1183{
1184 struct oob_data *data;
1185
1186 data = hci_find_remote_oob_data(hdev, bdaddr);
1187
1188 if (!data) {
1189 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1190 if (!data)
1191 return -ENOMEM;
1192
1193 bacpy(&data->bdaddr, bdaddr);
1194 list_add(&data->list, &hdev->remote_oob_data);
1195 }
1196
1197 memcpy(data->hash, hash, sizeof(data->hash));
1198 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1199
1200 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1201
1202 return 0;
1203}
1204
1085/* Register HCI device */ 1205/* Register HCI device */
1086int hci_register_dev(struct hci_dev *hdev) 1206int hci_register_dev(struct hci_dev *hdev)
1087{ 1207{
@@ -1146,6 +1266,8 @@ int hci_register_dev(struct hci_dev *hdev)
1146 1266
1147 INIT_LIST_HEAD(&hdev->link_keys); 1267 INIT_LIST_HEAD(&hdev->link_keys);
1148 1268
1269 INIT_LIST_HEAD(&hdev->remote_oob_data);
1270
1149 INIT_WORK(&hdev->power_on, hci_power_on); 1271 INIT_WORK(&hdev->power_on, hci_power_on);
1150 INIT_WORK(&hdev->power_off, hci_power_off); 1272 INIT_WORK(&hdev->power_off, hci_power_off);
1151 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); 1273 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
@@ -1225,6 +1347,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
1225 hci_blacklist_clear(hdev); 1347 hci_blacklist_clear(hdev);
1226 hci_uuids_clear(hdev); 1348 hci_uuids_clear(hdev);
1227 hci_link_keys_clear(hdev); 1349 hci_link_keys_clear(hdev);
1350 hci_remote_oob_data_clear(hdev);
1228 hci_dev_unlock_bh(hdev); 1351 hci_dev_unlock_bh(hdev);
1229 1352
1230 __hci_dev_put(hdev); 1353 __hci_dev_put(hdev);
@@ -1274,7 +1397,7 @@ int hci_recv_frame(struct sk_buff *skb)
1274EXPORT_SYMBOL(hci_recv_frame); 1397EXPORT_SYMBOL(hci_recv_frame);
1275 1398
1276static int hci_reassembly(struct hci_dev *hdev, int type, void *data, 1399static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1277 int count, __u8 index, gfp_t gfp_mask) 1400 int count, __u8 index)
1278{ 1401{
1279 int len = 0; 1402 int len = 0;
1280 int hlen = 0; 1403 int hlen = 0;
@@ -1304,7 +1427,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1304 break; 1427 break;
1305 } 1428 }
1306 1429
1307 skb = bt_skb_alloc(len, gfp_mask); 1430 skb = bt_skb_alloc(len, GFP_ATOMIC);
1308 if (!skb) 1431 if (!skb)
1309 return -ENOMEM; 1432 return -ENOMEM;
1310 1433
@@ -1390,8 +1513,7 @@ int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1390 return -EILSEQ; 1513 return -EILSEQ;
1391 1514
1392 while (count) { 1515 while (count) {
1393 rem = hci_reassembly(hdev, type, data, count, 1516 rem = hci_reassembly(hdev, type, data, count, type - 1);
1394 type - 1, GFP_ATOMIC);
1395 if (rem < 0) 1517 if (rem < 0)
1396 return rem; 1518 return rem;
1397 1519
@@ -1425,8 +1547,8 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1425 } else 1547 } else
1426 type = bt_cb(skb)->pkt_type; 1548 type = bt_cb(skb)->pkt_type;
1427 1549
1428 rem = hci_reassembly(hdev, type, data, 1550 rem = hci_reassembly(hdev, type, data, count,
1429 count, STREAM_REASSEMBLY, GFP_ATOMIC); 1551 STREAM_REASSEMBLY);
1430 if (rem < 0) 1552 if (rem < 0)
1431 return rem; 1553 return rem;
1432 1554
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index b2570159a044..d5aa97ee6ffa 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -56,7 +56,9 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
56 if (status) 56 if (status)
57 return; 57 return;
58 58
59 clear_bit(HCI_INQUIRY, &hdev->flags); 59 if (test_bit(HCI_MGMT, &hdev->flags) &&
60 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
61 mgmt_discovering(hdev->id, 0);
60 62
61 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 63 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
62 64
@@ -72,7 +74,9 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 if (status) 74 if (status)
73 return; 75 return;
74 76
75 clear_bit(HCI_INQUIRY, &hdev->flags); 77 if (test_bit(HCI_MGMT, &hdev->flags) &&
78 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
79 mgmt_discovering(hdev->id, 0);
76 80
77 hci_conn_check_pending(hdev); 81 hci_conn_check_pending(hdev);
78} 82}
@@ -195,14 +199,17 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
195 199
196 BT_DBG("%s status 0x%x", hdev->name, status); 200 BT_DBG("%s status 0x%x", hdev->name, status);
197 201
198 if (status)
199 return;
200
201 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
202 if (!sent) 203 if (!sent)
203 return; 204 return;
204 205
205 memcpy(hdev->dev_name, sent, 248); 206 if (test_bit(HCI_MGMT, &hdev->flags))
207 mgmt_set_local_name_complete(hdev->id, sent, status);
208
209 if (status)
210 return;
211
212 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
206} 213}
207 214
208static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 215static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -214,7 +221,7 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
214 if (rp->status) 221 if (rp->status)
215 return; 222 return;
216 223
217 memcpy(hdev->dev_name, rp->name, 248); 224 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
218} 225}
219 226
220static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 227static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -821,16 +828,31 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
821 rp->status); 828 rp->status);
822} 829}
823 830
831static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
832 struct sk_buff *skb)
833{
834 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
835
836 BT_DBG("%s status 0x%x", hdev->name, rp->status);
837
838 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
839 rp->randomizer, rp->status);
840}
841
824static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 842static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
825{ 843{
826 BT_DBG("%s status 0x%x", hdev->name, status); 844 BT_DBG("%s status 0x%x", hdev->name, status);
827 845
828 if (status) { 846 if (status) {
829 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 847 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
830
831 hci_conn_check_pending(hdev); 848 hci_conn_check_pending(hdev);
832 } else 849 return;
833 set_bit(HCI_INQUIRY, &hdev->flags); 850 }
851
852 if (test_bit(HCI_MGMT, &hdev->flags) &&
853 !test_and_set_bit(HCI_INQUIRY,
854 &hdev->flags))
855 mgmt_discovering(hdev->id, 1);
834} 856}
835 857
836static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 858static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -999,12 +1021,19 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
999 hci_dev_lock(hdev); 1021 hci_dev_lock(hdev);
1000 1022
1001 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1002 if (conn && hci_outgoing_auth_needed(hdev, conn)) { 1024 if (!conn)
1025 goto unlock;
1026
1027 if (!hci_outgoing_auth_needed(hdev, conn))
1028 goto unlock;
1029
1030 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1003 struct hci_cp_auth_requested cp; 1031 struct hci_cp_auth_requested cp;
1004 cp.handle = __cpu_to_le16(conn->handle); 1032 cp.handle = __cpu_to_le16(conn->handle);
1005 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1033 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1006 } 1034 }
1007 1035
1036unlock:
1008 hci_dev_unlock(hdev); 1037 hci_dev_unlock(hdev);
1009} 1038}
1010 1039
@@ -1194,7 +1223,9 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
1194 1223
1195 BT_DBG("%s status %d", hdev->name, status); 1224 BT_DBG("%s status %d", hdev->name, status);
1196 1225
1197 clear_bit(HCI_INQUIRY, &hdev->flags); 1226 if (test_bit(HCI_MGMT, &hdev->flags) &&
1227 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1228 mgmt_discovering(hdev->id, 0);
1198 1229
1199 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1230 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1200 1231
@@ -1214,7 +1245,13 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1214 1245
1215 hci_dev_lock(hdev); 1246 hci_dev_lock(hdev);
1216 1247
1217 for (; num_rsp; num_rsp--) { 1248 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
1249
1250 if (test_bit(HCI_MGMT, &hdev->flags))
1251 mgmt_discovering(hdev->id, 1);
1252 }
1253
1254 for (; num_rsp; num_rsp--, info++) {
1218 bacpy(&data.bdaddr, &info->bdaddr); 1255 bacpy(&data.bdaddr, &info->bdaddr);
1219 data.pscan_rep_mode = info->pscan_rep_mode; 1256 data.pscan_rep_mode = info->pscan_rep_mode;
1220 data.pscan_period_mode = info->pscan_period_mode; 1257 data.pscan_period_mode = info->pscan_period_mode;
@@ -1223,8 +1260,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1223 data.clock_offset = info->clock_offset; 1260 data.clock_offset = info->clock_offset;
1224 data.rssi = 0x00; 1261 data.rssi = 0x00;
1225 data.ssp_mode = 0x00; 1262 data.ssp_mode = 0x00;
1226 info++;
1227 hci_inquiry_cache_update(hdev, &data); 1263 hci_inquiry_cache_update(hdev, &data);
1264 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
1265 NULL);
1228 } 1266 }
1229 1267
1230 hci_dev_unlock(hdev); 1268 hci_dev_unlock(hdev);
@@ -1428,7 +1466,6 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1428 conn->sec_level = conn->pending_sec_level; 1466 conn->sec_level = conn->pending_sec_level;
1429 } else { 1467 } else {
1430 mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 1468 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1431 conn->sec_level = BT_SECURITY_LOW;
1432 } 1469 }
1433 1470
1434 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1471 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@@ -1482,13 +1519,23 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb
1482 1519
1483 hci_dev_lock(hdev); 1520 hci_dev_lock(hdev);
1484 1521
1522 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1523 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
1524
1485 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1525 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1486 if (conn && hci_outgoing_auth_needed(hdev, conn)) { 1526 if (!conn)
1527 goto unlock;
1528
1529 if (!hci_outgoing_auth_needed(hdev, conn))
1530 goto unlock;
1531
1532 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1487 struct hci_cp_auth_requested cp; 1533 struct hci_cp_auth_requested cp;
1488 cp.handle = __cpu_to_le16(conn->handle); 1534 cp.handle = __cpu_to_le16(conn->handle);
1489 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1535 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1490 } 1536 }
1491 1537
1538unlock:
1492 hci_dev_unlock(hdev); 1539 hci_dev_unlock(hdev);
1493} 1540}
1494 1541
@@ -1751,6 +1798,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1751 hci_cc_pin_code_neg_reply(hdev, skb); 1798 hci_cc_pin_code_neg_reply(hdev, skb);
1752 break; 1799 break;
1753 1800
1801 case HCI_OP_READ_LOCAL_OOB_DATA:
1802 hci_cc_read_local_oob_data_reply(hdev, skb);
1803 break;
1804
1754 case HCI_OP_LE_READ_BUFFER_SIZE: 1805 case HCI_OP_LE_READ_BUFFER_SIZE:
1755 hci_cc_le_read_buffer_size(hdev, skb); 1806 hci_cc_le_read_buffer_size(hdev, skb);
1756 break; 1807 break;
@@ -1984,9 +2035,16 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
1984 if (!test_bit(HCI_PAIRABLE, &hdev->flags)) 2035 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
1985 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2036 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1986 sizeof(ev->bdaddr), &ev->bdaddr); 2037 sizeof(ev->bdaddr), &ev->bdaddr);
2038 else if (test_bit(HCI_MGMT, &hdev->flags)) {
2039 u8 secure;
1987 2040
1988 if (test_bit(HCI_MGMT, &hdev->flags)) 2041 if (conn->pending_sec_level == BT_SECURITY_HIGH)
1989 mgmt_pin_code_request(hdev->id, &ev->bdaddr); 2042 secure = 1;
2043 else
2044 secure = 0;
2045
2046 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
2047 }
1990 2048
1991 hci_dev_unlock(hdev); 2049 hci_dev_unlock(hdev);
1992} 2050}
@@ -2015,17 +2073,30 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2015 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2073 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2016 batostr(&ev->bdaddr)); 2074 batostr(&ev->bdaddr));
2017 2075
2018 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) { 2076 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2077 key->type == HCI_LK_DEBUG_COMBINATION) {
2019 BT_DBG("%s ignoring debug key", hdev->name); 2078 BT_DBG("%s ignoring debug key", hdev->name);
2020 goto not_found; 2079 goto not_found;
2021 } 2080 }
2022 2081
2023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2082 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2083 if (conn) {
2084 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2085 conn->auth_type != 0xff &&
2086 (conn->auth_type & 0x01)) {
2087 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2088 goto not_found;
2089 }
2024 2090
2025 if (key->type == 0x04 && conn && conn->auth_type != 0xff && 2091 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2026 (conn->auth_type & 0x01)) { 2092 conn->pending_sec_level == BT_SECURITY_HIGH) {
2027 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2093 BT_DBG("%s ignoring key unauthenticated for high \
2028 goto not_found; 2094 security", hdev->name);
2095 goto not_found;
2096 }
2097
2098 conn->key_type = key->type;
2099 conn->pin_length = key->pin_len;
2029 } 2100 }
2030 2101
2031 bacpy(&cp.bdaddr, &ev->bdaddr); 2102 bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -2057,11 +2128,15 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
2057 hci_conn_hold(conn); 2128 hci_conn_hold(conn);
2058 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2129 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2059 pin_len = conn->pin_length; 2130 pin_len = conn->pin_length;
2131
2132 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2133 conn->key_type = ev->key_type;
2134
2060 hci_conn_put(conn); 2135 hci_conn_put(conn);
2061 } 2136 }
2062 2137
2063 if (test_bit(HCI_LINK_KEYS, &hdev->flags)) 2138 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2064 hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key, 2139 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2065 ev->key_type, pin_len); 2140 ev->key_type, pin_len);
2066 2141
2067 hci_dev_unlock(hdev); 2142 hci_dev_unlock(hdev);
@@ -2136,11 +2211,17 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2136 2211
2137 hci_dev_lock(hdev); 2212 hci_dev_lock(hdev);
2138 2213
2214 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2215
2216 if (test_bit(HCI_MGMT, &hdev->flags))
2217 mgmt_discovering(hdev->id, 1);
2218 }
2219
2139 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2220 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2140 struct inquiry_info_with_rssi_and_pscan_mode *info; 2221 struct inquiry_info_with_rssi_and_pscan_mode *info;
2141 info = (void *) (skb->data + 1); 2222 info = (void *) (skb->data + 1);
2142 2223
2143 for (; num_rsp; num_rsp--) { 2224 for (; num_rsp; num_rsp--, info++) {
2144 bacpy(&data.bdaddr, &info->bdaddr); 2225 bacpy(&data.bdaddr, &info->bdaddr);
2145 data.pscan_rep_mode = info->pscan_rep_mode; 2226 data.pscan_rep_mode = info->pscan_rep_mode;
2146 data.pscan_period_mode = info->pscan_period_mode; 2227 data.pscan_period_mode = info->pscan_period_mode;
@@ -2149,13 +2230,15 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2149 data.clock_offset = info->clock_offset; 2230 data.clock_offset = info->clock_offset;
2150 data.rssi = info->rssi; 2231 data.rssi = info->rssi;
2151 data.ssp_mode = 0x00; 2232 data.ssp_mode = 0x00;
2152 info++;
2153 hci_inquiry_cache_update(hdev, &data); 2233 hci_inquiry_cache_update(hdev, &data);
2234 mgmt_device_found(hdev->id, &info->bdaddr,
2235 info->dev_class, info->rssi,
2236 NULL);
2154 } 2237 }
2155 } else { 2238 } else {
2156 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2239 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2157 2240
2158 for (; num_rsp; num_rsp--) { 2241 for (; num_rsp; num_rsp--, info++) {
2159 bacpy(&data.bdaddr, &info->bdaddr); 2242 bacpy(&data.bdaddr, &info->bdaddr);
2160 data.pscan_rep_mode = info->pscan_rep_mode; 2243 data.pscan_rep_mode = info->pscan_rep_mode;
2161 data.pscan_period_mode = info->pscan_period_mode; 2244 data.pscan_period_mode = info->pscan_period_mode;
@@ -2164,8 +2247,10 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2164 data.clock_offset = info->clock_offset; 2247 data.clock_offset = info->clock_offset;
2165 data.rssi = info->rssi; 2248 data.rssi = info->rssi;
2166 data.ssp_mode = 0x00; 2249 data.ssp_mode = 0x00;
2167 info++;
2168 hci_inquiry_cache_update(hdev, &data); 2250 hci_inquiry_cache_update(hdev, &data);
2251 mgmt_device_found(hdev->id, &info->bdaddr,
2252 info->dev_class, info->rssi,
2253 NULL);
2169 } 2254 }
2170 } 2255 }
2171 2256
@@ -2294,9 +2379,15 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2294 if (!num_rsp) 2379 if (!num_rsp)
2295 return; 2380 return;
2296 2381
2382 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2383
2384 if (test_bit(HCI_MGMT, &hdev->flags))
2385 mgmt_discovering(hdev->id, 1);
2386 }
2387
2297 hci_dev_lock(hdev); 2388 hci_dev_lock(hdev);
2298 2389
2299 for (; num_rsp; num_rsp--) { 2390 for (; num_rsp; num_rsp--, info++) {
2300 bacpy(&data.bdaddr, &info->bdaddr); 2391 bacpy(&data.bdaddr, &info->bdaddr);
2301 data.pscan_rep_mode = info->pscan_rep_mode; 2392 data.pscan_rep_mode = info->pscan_rep_mode;
2302 data.pscan_period_mode = info->pscan_period_mode; 2393 data.pscan_period_mode = info->pscan_period_mode;
@@ -2305,8 +2396,9 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2305 data.clock_offset = info->clock_offset; 2396 data.clock_offset = info->clock_offset;
2306 data.rssi = info->rssi; 2397 data.rssi = info->rssi;
2307 data.ssp_mode = 0x01; 2398 data.ssp_mode = 0x01;
2308 info++;
2309 hci_inquiry_cache_update(hdev, &data); 2399 hci_inquiry_cache_update(hdev, &data);
2400 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
2401 info->rssi, info->data);
2310 } 2402 }
2311 2403
2312 hci_dev_unlock(hdev); 2404 hci_dev_unlock(hdev);
@@ -2326,7 +2418,7 @@ static inline u8 hci_get_auth_req(struct hci_conn *conn)
2326 2418
2327 /* If remote requests no-bonding follow that lead */ 2419 /* If remote requests no-bonding follow that lead */
2328 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01) 2420 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2329 return 0x00; 2421 return conn->remote_auth | (conn->auth_type & 0x01);
2330 2422
2331 return conn->auth_type; 2423 return conn->auth_type;
2332} 2424}
@@ -2355,8 +2447,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
2355 2447
2356 bacpy(&cp.bdaddr, &ev->bdaddr); 2448 bacpy(&cp.bdaddr, &ev->bdaddr);
2357 cp.capability = conn->io_capability; 2449 cp.capability = conn->io_capability;
2358 cp.oob_data = 0; 2450 conn->auth_type = hci_get_auth_req(conn);
2359 cp.authentication = hci_get_auth_req(conn); 2451 cp.authentication = conn->auth_type;
2452
2453 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2454 hci_find_remote_oob_data(hdev, &conn->dst))
2455 cp.oob_data = 0x01;
2456 else
2457 cp.oob_data = 0x00;
2360 2458
2361 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 2459 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2362 sizeof(cp), &cp); 2460 sizeof(cp), &cp);
@@ -2364,7 +2462,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
2364 struct hci_cp_io_capability_neg_reply cp; 2462 struct hci_cp_io_capability_neg_reply cp;
2365 2463
2366 bacpy(&cp.bdaddr, &ev->bdaddr); 2464 bacpy(&cp.bdaddr, &ev->bdaddr);
2367 cp.reason = 0x16; /* Pairing not allowed */ 2465 cp.reason = 0x18; /* Pairing not allowed */
2368 2466
2369 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 2467 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2370 sizeof(cp), &cp); 2468 sizeof(cp), &cp);
@@ -2399,14 +2497,67 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2399 struct sk_buff *skb) 2497 struct sk_buff *skb)
2400{ 2498{
2401 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 2499 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2500 int loc_mitm, rem_mitm, confirm_hint = 0;
2501 struct hci_conn *conn;
2402 2502
2403 BT_DBG("%s", hdev->name); 2503 BT_DBG("%s", hdev->name);
2404 2504
2405 hci_dev_lock(hdev); 2505 hci_dev_lock(hdev);
2406 2506
2407 if (test_bit(HCI_MGMT, &hdev->flags)) 2507 if (!test_bit(HCI_MGMT, &hdev->flags))
2408 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey); 2508 goto unlock;
2509
2510 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2511 if (!conn)
2512 goto unlock;
2513
2514 loc_mitm = (conn->auth_type & 0x01);
2515 rem_mitm = (conn->remote_auth & 0x01);
2516
2517 /* If we require MITM but the remote device can't provide that
2518 * (it has NoInputNoOutput) then reject the confirmation
2519 * request. The only exception is when we're dedicated bonding
2520 * initiators (connect_cfm_cb set) since then we always have the MITM
2521 * bit set. */
2522 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2523 BT_DBG("Rejecting request: remote device can't provide MITM");
2524 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2525 sizeof(ev->bdaddr), &ev->bdaddr);
2526 goto unlock;
2527 }
2528
2529 /* If no side requires MITM protection; auto-accept */
2530 if ((!loc_mitm || conn->remote_cap == 0x03) &&
2531 (!rem_mitm || conn->io_capability == 0x03)) {
2532
2533 /* If we're not the initiators request authorization to
2534 * proceed from user space (mgmt_user_confirm with
2535 * confirm_hint set to 1). */
2536 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2537 BT_DBG("Confirming auto-accept as acceptor");
2538 confirm_hint = 1;
2539 goto confirm;
2540 }
2541
2542 BT_DBG("Auto-accept of user confirmation with %ums delay",
2543 hdev->auto_accept_delay);
2544
2545 if (hdev->auto_accept_delay > 0) {
2546 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2547 mod_timer(&conn->auto_accept_timer, jiffies + delay);
2548 goto unlock;
2549 }
2550
2551 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2552 sizeof(ev->bdaddr), &ev->bdaddr);
2553 goto unlock;
2554 }
2409 2555
2556confirm:
2557 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
2558 confirm_hint);
2559
2560unlock:
2410 hci_dev_unlock(hdev); 2561 hci_dev_unlock(hdev);
2411} 2562}
2412 2563
@@ -2453,6 +2604,41 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
2453 hci_dev_unlock(hdev); 2604 hci_dev_unlock(hdev);
2454} 2605}
2455 2606
2607static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2608 struct sk_buff *skb)
2609{
2610 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2611 struct oob_data *data;
2612
2613 BT_DBG("%s", hdev->name);
2614
2615 hci_dev_lock(hdev);
2616
2617 if (!test_bit(HCI_MGMT, &hdev->flags))
2618 goto unlock;
2619
2620 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2621 if (data) {
2622 struct hci_cp_remote_oob_data_reply cp;
2623
2624 bacpy(&cp.bdaddr, &ev->bdaddr);
2625 memcpy(cp.hash, data->hash, sizeof(cp.hash));
2626 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2627
2628 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2629 &cp);
2630 } else {
2631 struct hci_cp_remote_oob_data_neg_reply cp;
2632
2633 bacpy(&cp.bdaddr, &ev->bdaddr);
2634 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2635 &cp);
2636 }
2637
2638unlock:
2639 hci_dev_unlock(hdev);
2640}
2641
2456static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2642static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2457{ 2643{
2458 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 2644 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
@@ -2655,6 +2841,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2655 hci_le_meta_evt(hdev, skb); 2841 hci_le_meta_evt(hdev, skb);
2656 break; 2842 break;
2657 2843
2844 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
2845 hci_remote_oob_data_request_evt(hdev, skb);
2846 break;
2847
2658 default: 2848 default:
2659 BT_DBG("%s event 0x%x", hdev->name, event); 2849 BT_DBG("%s event 0x%x", hdev->name, event);
2660 break; 2850 break;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 3c838a65a75a..a6c3aa8be1f7 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -216,13 +216,13 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr, char
216static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 216static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
217{ 217{
218 struct hci_dev *hdev = dev_get_drvdata(dev); 218 struct hci_dev *hdev = dev_get_drvdata(dev);
219 char name[249]; 219 char name[HCI_MAX_NAME_LENGTH + 1];
220 int i; 220 int i;
221 221
222 for (i = 0; i < 248; i++) 222 for (i = 0; i < HCI_MAX_NAME_LENGTH; i++)
223 name[i] = hdev->dev_name[i]; 223 name[i] = hdev->dev_name[i];
224 224
225 name[248] = '\0'; 225 name[HCI_MAX_NAME_LENGTH] = '\0';
226 return sprintf(buf, "%s\n", name); 226 return sprintf(buf, "%s\n", name);
227} 227}
228 228
@@ -277,10 +277,12 @@ static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *at
277static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 277static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
278{ 278{
279 struct hci_dev *hdev = dev_get_drvdata(dev); 279 struct hci_dev *hdev = dev_get_drvdata(dev);
280 unsigned long val; 280 unsigned int val;
281 int rv;
281 282
282 if (strict_strtoul(buf, 0, &val) < 0) 283 rv = kstrtouint(buf, 0, &val);
283 return -EINVAL; 284 if (rv < 0)
285 return rv;
284 286
285 if (val != 0 && (val < 500 || val > 3600000)) 287 if (val != 0 && (val < 500 || val > 3600000))
286 return -EINVAL; 288 return -EINVAL;
@@ -299,15 +301,14 @@ static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribu
299static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 301static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
300{ 302{
301 struct hci_dev *hdev = dev_get_drvdata(dev); 303 struct hci_dev *hdev = dev_get_drvdata(dev);
302 unsigned long val; 304 u16 val;
303 305 int rv;
304 if (strict_strtoul(buf, 0, &val) < 0)
305 return -EINVAL;
306 306
307 if (val < 0x0002 || val > 0xFFFE || val % 2) 307 rv = kstrtou16(buf, 0, &val);
308 return -EINVAL; 308 if (rv < 0)
309 return rv;
309 310
310 if (val < hdev->sniff_min_interval) 311 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
311 return -EINVAL; 312 return -EINVAL;
312 313
313 hdev->sniff_max_interval = val; 314 hdev->sniff_max_interval = val;
@@ -324,15 +325,14 @@ static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribu
324static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 325static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
325{ 326{
326 struct hci_dev *hdev = dev_get_drvdata(dev); 327 struct hci_dev *hdev = dev_get_drvdata(dev);
327 unsigned long val; 328 u16 val;
329 int rv;
328 330
329 if (strict_strtoul(buf, 0, &val) < 0) 331 rv = kstrtou16(buf, 0, &val);
330 return -EINVAL; 332 if (rv < 0)
331 333 return rv;
332 if (val < 0x0002 || val > 0xFFFE || val % 2)
333 return -EINVAL;
334 334
335 if (val > hdev->sniff_max_interval) 335 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
336 return -EINVAL; 336 return -EINVAL;
337 337
338 hdev->sniff_min_interval = val; 338 hdev->sniff_min_interval = val;
@@ -511,6 +511,35 @@ static const struct file_operations uuids_fops = {
511 .release = single_release, 511 .release = single_release,
512}; 512};
513 513
514static int auto_accept_delay_set(void *data, u64 val)
515{
516 struct hci_dev *hdev = data;
517
518 hci_dev_lock_bh(hdev);
519
520 hdev->auto_accept_delay = val;
521
522 hci_dev_unlock_bh(hdev);
523
524 return 0;
525}
526
527static int auto_accept_delay_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock_bh(hdev);
532
533 *val = hdev->auto_accept_delay;
534
535 hci_dev_unlock_bh(hdev);
536
537 return 0;
538}
539
540DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
541 auto_accept_delay_set, "%llu\n");
542
514int hci_register_sysfs(struct hci_dev *hdev) 543int hci_register_sysfs(struct hci_dev *hdev)
515{ 544{
516 struct device *dev = &hdev->dev; 545 struct device *dev = &hdev->dev;
@@ -545,6 +574,8 @@ int hci_register_sysfs(struct hci_dev *hdev)
545 574
546 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 575 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
547 576
577 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
578 &auto_accept_delay_fops);
548 return 0; 579 return 0;
549} 580}
550 581
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 5ec12971af6b..c405a954a603 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -37,6 +37,7 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/wait.h> 38#include <linux/wait.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/kthread.h>
40#include <net/sock.h> 41#include <net/sock.h>
41 42
42#include <linux/input.h> 43#include <linux/input.h>
@@ -55,22 +56,24 @@ static DECLARE_RWSEM(hidp_session_sem);
55static LIST_HEAD(hidp_session_list); 56static LIST_HEAD(hidp_session_list);
56 57
57static unsigned char hidp_keycode[256] = { 58static unsigned char hidp_keycode[256] = {
58 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 59 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36,
59 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 60 37, 38, 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45,
60 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 61 21, 44, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 1,
61 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 62 14, 15, 57, 12, 13, 26, 27, 43, 43, 39, 40, 41, 51, 52,
62 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106, 63 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 87, 88,
63 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 64 99, 70, 119, 110, 102, 104, 111, 107, 109, 106, 105, 108, 103, 69,
64 72, 73, 82, 83, 86,127,116,117,183,184,185,186,187,188,189,190, 65 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 72, 73,
65 191,192,193,194,134,138,130,132,128,129,131,137,133,135,136,113, 66 82, 83, 86, 127, 116, 117, 183, 184, 185, 186, 187, 188, 189, 190,
66 115,114, 0, 0, 0,121, 0, 89, 93,124, 92, 94, 95, 0, 0, 0, 67 191, 192, 193, 194, 134, 138, 130, 132, 128, 129, 131, 137, 133, 135,
67 122,123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 68 136, 113, 115, 114, 0, 0, 0, 121, 0, 89, 93, 124, 92, 94,
68 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 69 95, 0, 0, 0, 122, 123, 90, 91, 85, 0, 0, 0, 0, 0,
69 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
70 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
71 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 72 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
72 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113, 73 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
73 150,158,159,128,136,177,178,176,142,152,173,140 74 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
75 29, 42, 56, 125, 97, 54, 100, 126, 164, 166, 165, 163, 161, 115,
76 114, 113, 150, 158, 159, 128, 136, 177, 178, 176, 142, 152, 173, 140
74}; 77};
75 78
76static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; 79static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
@@ -461,8 +464,7 @@ static void hidp_idle_timeout(unsigned long arg)
461{ 464{
462 struct hidp_session *session = (struct hidp_session *) arg; 465 struct hidp_session *session = (struct hidp_session *) arg;
463 466
464 atomic_inc(&session->terminate); 467 kthread_stop(session->task);
465 hidp_schedule(session);
466} 468}
467 469
468static void hidp_set_timer(struct hidp_session *session) 470static void hidp_set_timer(struct hidp_session *session)
@@ -533,9 +535,7 @@ static void hidp_process_hid_control(struct hidp_session *session,
533 skb_queue_purge(&session->ctrl_transmit); 535 skb_queue_purge(&session->ctrl_transmit);
534 skb_queue_purge(&session->intr_transmit); 536 skb_queue_purge(&session->intr_transmit);
535 537
536 /* Kill session thread */ 538 kthread_stop(session->task);
537 atomic_inc(&session->terminate);
538 hidp_schedule(session);
539 } 539 }
540} 540}
541 541
@@ -694,22 +694,10 @@ static int hidp_session(void *arg)
694 struct sock *ctrl_sk = session->ctrl_sock->sk; 694 struct sock *ctrl_sk = session->ctrl_sock->sk;
695 struct sock *intr_sk = session->intr_sock->sk; 695 struct sock *intr_sk = session->intr_sock->sk;
696 struct sk_buff *skb; 696 struct sk_buff *skb;
697 int vendor = 0x0000, product = 0x0000;
698 wait_queue_t ctrl_wait, intr_wait; 697 wait_queue_t ctrl_wait, intr_wait;
699 698
700 BT_DBG("session %p", session); 699 BT_DBG("session %p", session);
701 700
702 if (session->input) {
703 vendor = session->input->id.vendor;
704 product = session->input->id.product;
705 }
706
707 if (session->hid) {
708 vendor = session->hid->vendor;
709 product = session->hid->product;
710 }
711
712 daemonize("khidpd_%04x%04x", vendor, product);
713 set_user_nice(current, -15); 701 set_user_nice(current, -15);
714 702
715 init_waitqueue_entry(&ctrl_wait, current); 703 init_waitqueue_entry(&ctrl_wait, current);
@@ -718,10 +706,11 @@ static int hidp_session(void *arg)
718 add_wait_queue(sk_sleep(intr_sk), &intr_wait); 706 add_wait_queue(sk_sleep(intr_sk), &intr_wait);
719 session->waiting_for_startup = 0; 707 session->waiting_for_startup = 0;
720 wake_up_interruptible(&session->startup_queue); 708 wake_up_interruptible(&session->startup_queue);
721 while (!atomic_read(&session->terminate)) { 709 while (!kthread_should_stop()) {
722 set_current_state(TASK_INTERRUPTIBLE); 710 set_current_state(TASK_INTERRUPTIBLE);
723 711
724 if (ctrl_sk->sk_state != BT_CONNECTED || intr_sk->sk_state != BT_CONNECTED) 712 if (ctrl_sk->sk_state != BT_CONNECTED ||
713 intr_sk->sk_state != BT_CONNECTED)
725 break; 714 break;
726 715
727 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { 716 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
@@ -965,6 +954,7 @@ fault:
965int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) 954int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
966{ 955{
967 struct hidp_session *session, *s; 956 struct hidp_session *session, *s;
957 int vendor, product;
968 int err; 958 int err;
969 959
970 BT_DBG(""); 960 BT_DBG("");
@@ -989,8 +979,10 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
989 979
990 bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); 980 bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
991 981
992 session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu); 982 session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->chan->omtu,
993 session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu); 983 l2cap_pi(ctrl_sock->sk)->chan->imtu);
984 session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->chan->omtu,
985 l2cap_pi(intr_sock->sk)->chan->imtu);
994 986
995 BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu); 987 BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu);
996 988
@@ -1026,9 +1018,24 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1026 1018
1027 hidp_set_timer(session); 1019 hidp_set_timer(session);
1028 1020
1029 err = kernel_thread(hidp_session, session, CLONE_KERNEL); 1021 if (session->hid) {
1030 if (err < 0) 1022 vendor = session->hid->vendor;
1023 product = session->hid->product;
1024 } else if (session->input) {
1025 vendor = session->input->id.vendor;
1026 product = session->input->id.product;
1027 } else {
1028 vendor = 0x0000;
1029 product = 0x0000;
1030 }
1031
1032 session->task = kthread_run(hidp_session, session, "khidpd_%04x%04x",
1033 vendor, product);
1034 if (IS_ERR(session->task)) {
1035 err = PTR_ERR(session->task);
1031 goto unlink; 1036 goto unlink;
1037 }
1038
1032 while (session->waiting_for_startup) { 1039 while (session->waiting_for_startup) {
1033 wait_event_interruptible(session->startup_queue, 1040 wait_event_interruptible(session->startup_queue,
1034 !session->waiting_for_startup); 1041 !session->waiting_for_startup);
@@ -1053,8 +1060,7 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1053err_add_device: 1060err_add_device:
1054 hid_destroy_device(session->hid); 1061 hid_destroy_device(session->hid);
1055 session->hid = NULL; 1062 session->hid = NULL;
1056 atomic_inc(&session->terminate); 1063 kthread_stop(session->task);
1057 hidp_schedule(session);
1058 1064
1059unlink: 1065unlink:
1060 hidp_del_timer(session); 1066 hidp_del_timer(session);
@@ -1105,13 +1111,7 @@ int hidp_del_connection(struct hidp_conndel_req *req)
1105 skb_queue_purge(&session->ctrl_transmit); 1111 skb_queue_purge(&session->ctrl_transmit);
1106 skb_queue_purge(&session->intr_transmit); 1112 skb_queue_purge(&session->intr_transmit);
1107 1113
1108 /* Wakeup user-space polling for socket errors */ 1114 kthread_stop(session->task);
1109 session->intr_sock->sk->sk_err = EUNATCH;
1110 session->ctrl_sock->sk->sk_err = EUNATCH;
1111
1112 /* Kill session thread */
1113 atomic_inc(&session->terminate);
1114 hidp_schedule(session);
1115 } 1115 }
1116 } else 1116 } else
1117 err = -ENOENT; 1117 err = -ENOENT;
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 13de5fa03480..12822cde4b49 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -84,8 +84,8 @@
84#define HIDP_WAITING_FOR_SEND_ACK 11 84#define HIDP_WAITING_FOR_SEND_ACK 11
85 85
86struct hidp_connadd_req { 86struct hidp_connadd_req {
87 int ctrl_sock; // Connected control socket 87 int ctrl_sock; /* Connected control socket */
88 int intr_sock; // Connteted interrupt socket 88 int intr_sock; /* Connected interrupt socket */
89 __u16 parser; 89 __u16 parser;
90 __u16 rd_size; 90 __u16 rd_size;
91 __u8 __user *rd_data; 91 __u8 __user *rd_data;
@@ -142,7 +142,7 @@ struct hidp_session {
142 uint ctrl_mtu; 142 uint ctrl_mtu;
143 uint intr_mtu; 143 uint intr_mtu;
144 144
145 atomic_t terminate; 145 struct task_struct *task;
146 146
147 unsigned char keys[8]; 147 unsigned char keys[8];
148 unsigned char leds; 148 unsigned char leds;
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 250dfd46237d..178ac7f127ad 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -85,7 +85,8 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
85 return err; 85 return err;
86 } 86 }
87 87
88 if (csock->sk->sk_state != BT_CONNECTED || isock->sk->sk_state != BT_CONNECTED) { 88 if (csock->sk->sk_state != BT_CONNECTED ||
89 isock->sk->sk_state != BT_CONNECTED) {
89 sockfd_put(csock); 90 sockfd_put(csock);
90 sockfd_put(isock); 91 sockfd_put(isock);
91 return -EBADFD; 92 return -EBADFD;
@@ -140,8 +141,8 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
140 141
141#ifdef CONFIG_COMPAT 142#ifdef CONFIG_COMPAT
142struct compat_hidp_connadd_req { 143struct compat_hidp_connadd_req {
143 int ctrl_sock; // Connected control socket 144 int ctrl_sock; /* Connected control socket */
144 int intr_sock; // Connteted interrupt socket 145 int intr_sock; /* Connected interrupt socket */
145 __u16 parser; 146 __u16 parser;
146 __u16 rd_size; 147 __u16 rd_size;
147 compat_uptr_t rd_data; 148 compat_uptr_t rd_data;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2c8dd4494c63..a378acc491ec 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -70,160 +70,160 @@ static void l2cap_busy_work(struct work_struct *work);
70 70
71static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 71static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data); 72 u8 code, u8 ident, u16 dlen, void *data);
73static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 74
74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); 75static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75 76
76/* ---- L2CAP channels ---- */ 77/* ---- L2CAP channels ---- */
77static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) 78static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
78{ 79{
79 struct sock *s; 80 struct l2cap_chan *c;
80 for (s = l->head; s; s = l2cap_pi(s)->next_c) { 81
81 if (l2cap_pi(s)->dcid == cid) 82 list_for_each_entry(c, &conn->chan_l, list) {
82 break; 83 if (c->dcid == cid)
84 return c;
83 } 85 }
84 return s; 86 return NULL;
87
85} 88}
86 89
87static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) 90static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88{ 91{
89 struct sock *s; 92 struct l2cap_chan *c;
90 for (s = l->head; s; s = l2cap_pi(s)->next_c) { 93
91 if (l2cap_pi(s)->scid == cid) 94 list_for_each_entry(c, &conn->chan_l, list) {
92 break; 95 if (c->scid == cid)
96 return c;
93 } 97 }
94 return s; 98 return NULL;
95} 99}
96 100
97/* Find channel with given SCID. 101/* Find channel with given SCID.
98 * Returns locked socket */ 102 * Returns locked socket */
99static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) 103static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
100{ 104{
101 struct sock *s; 105 struct l2cap_chan *c;
102 read_lock(&l->lock); 106
103 s = __l2cap_get_chan_by_scid(l, cid); 107 read_lock(&conn->chan_lock);
104 if (s) 108 c = __l2cap_get_chan_by_scid(conn, cid);
105 bh_lock_sock(s); 109 if (c)
106 read_unlock(&l->lock); 110 bh_lock_sock(c->sk);
107 return s; 111 read_unlock(&conn->chan_lock);
112 return c;
108} 113}
109 114
110static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) 115static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
111{ 116{
112 struct sock *s; 117 struct l2cap_chan *c;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) { 118
114 if (l2cap_pi(s)->ident == ident) 119 list_for_each_entry(c, &conn->chan_l, list) {
115 break; 120 if (c->ident == ident)
121 return c;
116 } 122 }
117 return s; 123 return NULL;
118} 124}
119 125
120static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) 126static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
121{ 127{
122 struct sock *s; 128 struct l2cap_chan *c;
123 read_lock(&l->lock); 129
124 s = __l2cap_get_chan_by_ident(l, ident); 130 read_lock(&conn->chan_lock);
125 if (s) 131 c = __l2cap_get_chan_by_ident(conn, ident);
126 bh_lock_sock(s); 132 if (c)
127 read_unlock(&l->lock); 133 bh_lock_sock(c->sk);
128 return s; 134 read_unlock(&conn->chan_lock);
135 return c;
129} 136}
130 137
131static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) 138static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
132{ 139{
133 u16 cid = L2CAP_CID_DYN_START; 140 u16 cid = L2CAP_CID_DYN_START;
134 141
135 for (; cid < L2CAP_CID_DYN_END; cid++) { 142 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid)) 143 if (!__l2cap_get_chan_by_scid(conn, cid))
137 return cid; 144 return cid;
138 } 145 }
139 146
140 return 0; 147 return 0;
141} 148}
142 149
143static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk) 150struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
144{ 151{
145 sock_hold(sk); 152 struct l2cap_chan *chan;
153
154 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
155 if (!chan)
156 return NULL;
146 157
147 if (l->head) 158 chan->sk = sk;
148 l2cap_pi(l->head)->prev_c = sk;
149 159
150 l2cap_pi(sk)->next_c = l->head; 160 return chan;
151 l2cap_pi(sk)->prev_c = NULL;
152 l->head = sk;
153} 161}
154 162
155static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) 163void l2cap_chan_free(struct l2cap_chan *chan)
156{ 164{
157 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; 165 kfree(chan);
158
159 write_lock_bh(&l->lock);
160 if (sk == l->head)
161 l->head = next;
162
163 if (next)
164 l2cap_pi(next)->prev_c = prev;
165 if (prev)
166 l2cap_pi(prev)->next_c = next;
167 write_unlock_bh(&l->lock);
168
169 __sock_put(sk);
170} 166}
171 167
172static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) 168static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
173{ 169{
174 struct l2cap_chan_list *l = &conn->chan_list; 170 struct sock *sk = chan->sk;
175 171
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 172 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); 173 chan->psm, chan->dcid);
178 174
179 conn->disc_reason = 0x13; 175 conn->disc_reason = 0x13;
180 176
181 l2cap_pi(sk)->conn = conn; 177 chan->conn = conn;
182 178
183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { 179 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 if (conn->hcon->type == LE_LINK) { 180 if (conn->hcon->type == LE_LINK) {
185 /* LE connection */ 181 /* LE connection */
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU; 182 chan->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA; 183 chan->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA; 184 chan->dcid = L2CAP_CID_LE_DATA;
189 } else { 185 } else {
190 /* Alloc CID for connection-oriented socket */ 186 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l); 187 chan->scid = l2cap_alloc_cid(conn);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; 188 chan->omtu = L2CAP_DEFAULT_MTU;
193 } 189 }
194 } else if (sk->sk_type == SOCK_DGRAM) { 190 } else if (sk->sk_type == SOCK_DGRAM) {
195 /* Connectionless socket */ 191 /* Connectionless socket */
196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS; 192 chan->scid = L2CAP_CID_CONN_LESS;
197 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS; 193 chan->dcid = L2CAP_CID_CONN_LESS;
198 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; 194 chan->omtu = L2CAP_DEFAULT_MTU;
199 } else { 195 } else {
200 /* Raw socket can send/recv signalling messages only */ 196 /* Raw socket can send/recv signalling messages only */
201 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING; 197 chan->scid = L2CAP_CID_SIGNALING;
202 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING; 198 chan->dcid = L2CAP_CID_SIGNALING;
203 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; 199 chan->omtu = L2CAP_DEFAULT_MTU;
204 } 200 }
205 201
206 __l2cap_chan_link(l, sk); 202 sock_hold(sk);
207 203
208 if (parent) 204 list_add(&chan->list, &conn->chan_l);
209 bt_accept_enqueue(parent, sk);
210} 205}
211 206
212/* Delete channel. 207/* Delete channel.
213 * Must be called on the locked socket. */ 208 * Must be called on the locked socket. */
214void l2cap_chan_del(struct sock *sk, int err) 209void l2cap_chan_del(struct l2cap_chan *chan, int err)
215{ 210{
216 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 211 struct sock *sk = chan->sk;
212 struct l2cap_conn *conn = chan->conn;
217 struct sock *parent = bt_sk(sk)->parent; 213 struct sock *parent = bt_sk(sk)->parent;
218 214
219 l2cap_sock_clear_timer(sk); 215 l2cap_sock_clear_timer(sk);
220 216
221 BT_DBG("sk %p, conn %p, err %d", sk, conn, err); 217 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
222 218
223 if (conn) { 219 if (conn) {
224 /* Unlink from channel list */ 220 /* Delete from channel list */
225 l2cap_chan_unlink(&conn->chan_list, sk); 221 write_lock_bh(&conn->chan_lock);
226 l2cap_pi(sk)->conn = NULL; 222 list_del(&chan->list);
223 write_unlock_bh(&conn->chan_lock);
224 __sock_put(sk);
225
226 chan->conn = NULL;
227 hci_conn_put(conn->hcon); 227 hci_conn_put(conn->hcon);
228 } 228 }
229 229
@@ -239,29 +239,35 @@ void l2cap_chan_del(struct sock *sk, int err)
239 } else 239 } else
240 sk->sk_state_change(sk); 240 sk->sk_state_change(sk);
241 241
242 skb_queue_purge(TX_QUEUE(sk)); 242 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
243 chan->conf_state & L2CAP_CONF_INPUT_DONE))
244 return;
245
246 skb_queue_purge(&chan->tx_q);
243 247
244 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { 248 if (chan->mode == L2CAP_MODE_ERTM) {
245 struct srej_list *l, *tmp; 249 struct srej_list *l, *tmp;
246 250
247 del_timer(&l2cap_pi(sk)->retrans_timer); 251 del_timer(&chan->retrans_timer);
248 del_timer(&l2cap_pi(sk)->monitor_timer); 252 del_timer(&chan->monitor_timer);
249 del_timer(&l2cap_pi(sk)->ack_timer); 253 del_timer(&chan->ack_timer);
250 254
251 skb_queue_purge(SREJ_QUEUE(sk)); 255 skb_queue_purge(&chan->srej_q);
252 skb_queue_purge(BUSY_QUEUE(sk)); 256 skb_queue_purge(&chan->busy_q);
253 257
254 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) { 258 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
255 list_del(&l->list); 259 list_del(&l->list);
256 kfree(l); 260 kfree(l);
257 } 261 }
258 } 262 }
259} 263}
260 264
261static inline u8 l2cap_get_auth_type(struct sock *sk) 265static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
262{ 266{
267 struct sock *sk = chan->sk;
268
263 if (sk->sk_type == SOCK_RAW) { 269 if (sk->sk_type == SOCK_RAW) {
264 switch (l2cap_pi(sk)->sec_level) { 270 switch (chan->sec_level) {
265 case BT_SECURITY_HIGH: 271 case BT_SECURITY_HIGH:
266 return HCI_AT_DEDICATED_BONDING_MITM; 272 return HCI_AT_DEDICATED_BONDING_MITM;
267 case BT_SECURITY_MEDIUM: 273 case BT_SECURITY_MEDIUM:
@@ -269,16 +275,16 @@ static inline u8 l2cap_get_auth_type(struct sock *sk)
269 default: 275 default:
270 return HCI_AT_NO_BONDING; 276 return HCI_AT_NO_BONDING;
271 } 277 }
272 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { 278 } else if (chan->psm == cpu_to_le16(0x0001)) {
273 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) 279 if (chan->sec_level == BT_SECURITY_LOW)
274 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; 280 chan->sec_level = BT_SECURITY_SDP;
275 281
276 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 282 if (chan->sec_level == BT_SECURITY_HIGH)
277 return HCI_AT_NO_BONDING_MITM; 283 return HCI_AT_NO_BONDING_MITM;
278 else 284 else
279 return HCI_AT_NO_BONDING; 285 return HCI_AT_NO_BONDING;
280 } else { 286 } else {
281 switch (l2cap_pi(sk)->sec_level) { 287 switch (chan->sec_level) {
282 case BT_SECURITY_HIGH: 288 case BT_SECURITY_HIGH:
283 return HCI_AT_GENERAL_BONDING_MITM; 289 return HCI_AT_GENERAL_BONDING_MITM;
284 case BT_SECURITY_MEDIUM: 290 case BT_SECURITY_MEDIUM:
@@ -290,15 +296,14 @@ static inline u8 l2cap_get_auth_type(struct sock *sk)
290} 296}
291 297
292/* Service level security */ 298/* Service level security */
293static inline int l2cap_check_security(struct sock *sk) 299static inline int l2cap_check_security(struct l2cap_chan *chan)
294{ 300{
295 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 301 struct l2cap_conn *conn = chan->conn;
296 __u8 auth_type; 302 __u8 auth_type;
297 303
298 auth_type = l2cap_get_auth_type(sk); 304 auth_type = l2cap_get_auth_type(chan);
299 305
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level, 306 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
301 auth_type);
302} 307}
303 308
304u8 l2cap_get_ident(struct l2cap_conn *conn) 309u8 l2cap_get_ident(struct l2cap_conn *conn)
@@ -341,11 +346,12 @@ void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *d
341 hci_send_acl(conn->hcon, skb, flags); 346 hci_send_acl(conn->hcon, skb, flags);
342} 347}
343 348
344static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) 349static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
345{ 350{
346 struct sk_buff *skb; 351 struct sk_buff *skb;
347 struct l2cap_hdr *lh; 352 struct l2cap_hdr *lh;
348 struct l2cap_conn *conn = pi->conn; 353 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
354 struct l2cap_conn *conn = chan->conn;
349 struct sock *sk = (struct sock *)pi; 355 struct sock *sk = (struct sock *)pi;
350 int count, hlen = L2CAP_HDR_SIZE + 2; 356 int count, hlen = L2CAP_HDR_SIZE + 2;
351 u8 flags; 357 u8 flags;
@@ -353,22 +359,22 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
353 if (sk->sk_state != BT_CONNECTED) 359 if (sk->sk_state != BT_CONNECTED)
354 return; 360 return;
355 361
356 if (pi->fcs == L2CAP_FCS_CRC16) 362 if (chan->fcs == L2CAP_FCS_CRC16)
357 hlen += 2; 363 hlen += 2;
358 364
359 BT_DBG("pi %p, control 0x%2.2x", pi, control); 365 BT_DBG("chan %p, control 0x%2.2x", chan, control);
360 366
361 count = min_t(unsigned int, conn->mtu, hlen); 367 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE; 368 control |= L2CAP_CTRL_FRAME_TYPE;
363 369
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { 370 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL; 371 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; 372 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
367 } 373 }
368 374
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) { 375 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL; 376 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT; 377 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
372 } 378 }
373 379
374 skb = bt_skb_alloc(count, GFP_ATOMIC); 380 skb = bt_skb_alloc(count, GFP_ATOMIC);
@@ -377,10 +383,10 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
377 383
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 384 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 385 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid); 386 lh->cid = cpu_to_le16(chan->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2)); 387 put_unaligned_le16(control, skb_put(skb, 2));
382 388
383 if (pi->fcs == L2CAP_FCS_CRC16) { 389 if (chan->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2); 390 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2)); 391 put_unaligned_le16(fcs, skb_put(skb, 2));
386 } 392 }
@@ -390,45 +396,46 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
390 else 396 else
391 flags = ACL_START; 397 flags = ACL_START;
392 398
393 hci_send_acl(pi->conn->hcon, skb, flags); 399 hci_send_acl(chan->conn->hcon, skb, flags);
394} 400}
395 401
396static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) 402static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
397{ 403{
398 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 404 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 control |= L2CAP_SUPER_RCV_NOT_READY; 405 control |= L2CAP_SUPER_RCV_NOT_READY;
400 pi->conn_state |= L2CAP_CONN_RNR_SENT; 406 chan->conn_state |= L2CAP_CONN_RNR_SENT;
401 } else 407 } else
402 control |= L2CAP_SUPER_RCV_READY; 408 control |= L2CAP_SUPER_RCV_READY;
403 409
404 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 410 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
405 411
406 l2cap_send_sframe(pi, control); 412 l2cap_send_sframe(chan, control);
407} 413}
408 414
409static inline int __l2cap_no_conn_pending(struct sock *sk) 415static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
410{ 416{
411 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND); 417 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
412} 418}
413 419
414static void l2cap_do_start(struct sock *sk) 420static void l2cap_do_start(struct l2cap_chan *chan)
415{ 421{
416 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 422 struct l2cap_conn *conn = chan->conn;
417 423
418 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { 424 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
419 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 425 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
420 return; 426 return;
421 427
422 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) { 428 if (l2cap_check_security(chan) &&
429 __l2cap_no_conn_pending(chan)) {
423 struct l2cap_conn_req req; 430 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 431 req.scid = cpu_to_le16(chan->scid);
425 req.psm = l2cap_pi(sk)->psm; 432 req.psm = chan->psm;
426 433
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 434 chan->ident = l2cap_get_ident(conn);
428 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 435 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
429 436
430 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 437 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
431 L2CAP_CONN_REQ, sizeof(req), &req); 438 sizeof(req), &req);
432 } 439 }
433 } else { 440 } else {
434 struct l2cap_info_req req; 441 struct l2cap_info_req req;
@@ -461,23 +468,24 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
461 } 468 }
462} 469}
463 470
464void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err) 471void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
465{ 472{
473 struct sock *sk;
466 struct l2cap_disconn_req req; 474 struct l2cap_disconn_req req;
467 475
468 if (!conn) 476 if (!conn)
469 return; 477 return;
470 478
471 skb_queue_purge(TX_QUEUE(sk)); 479 sk = chan->sk;
472 480
473 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { 481 if (chan->mode == L2CAP_MODE_ERTM) {
474 del_timer(&l2cap_pi(sk)->retrans_timer); 482 del_timer(&chan->retrans_timer);
475 del_timer(&l2cap_pi(sk)->monitor_timer); 483 del_timer(&chan->monitor_timer);
476 del_timer(&l2cap_pi(sk)->ack_timer); 484 del_timer(&chan->ack_timer);
477 } 485 }
478 486
479 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); 487 req.dcid = cpu_to_le16(chan->dcid);
480 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 488 req.scid = cpu_to_le16(chan->scid);
481 l2cap_send_cmd(conn, l2cap_get_ident(conn), 489 l2cap_send_cmd(conn, l2cap_get_ident(conn),
482 L2CAP_DISCONN_REQ, sizeof(req), &req); 490 L2CAP_DISCONN_REQ, sizeof(req), &req);
483 491
@@ -488,17 +496,15 @@ void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
488/* ---- L2CAP connections ---- */ 496/* ---- L2CAP connections ---- */
489static void l2cap_conn_start(struct l2cap_conn *conn) 497static void l2cap_conn_start(struct l2cap_conn *conn)
490{ 498{
491 struct l2cap_chan_list *l = &conn->chan_list; 499 struct l2cap_chan *chan, *tmp;
492 struct sock_del_list del, *tmp1, *tmp2;
493 struct sock *sk;
494 500
495 BT_DBG("conn %p", conn); 501 BT_DBG("conn %p", conn);
496 502
497 INIT_LIST_HEAD(&del.list); 503 read_lock(&conn->chan_lock);
498 504
499 read_lock(&l->lock); 505 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
506 struct sock *sk = chan->sk;
500 507
501 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
502 bh_lock_sock(sk); 508 bh_lock_sock(sk);
503 509
504 if (sk->sk_type != SOCK_SEQPACKET && 510 if (sk->sk_type != SOCK_SEQPACKET &&
@@ -510,40 +516,41 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
510 if (sk->sk_state == BT_CONNECT) { 516 if (sk->sk_state == BT_CONNECT) {
511 struct l2cap_conn_req req; 517 struct l2cap_conn_req req;
512 518
513 if (!l2cap_check_security(sk) || 519 if (!l2cap_check_security(chan) ||
514 !__l2cap_no_conn_pending(sk)) { 520 !__l2cap_no_conn_pending(chan)) {
515 bh_unlock_sock(sk); 521 bh_unlock_sock(sk);
516 continue; 522 continue;
517 } 523 }
518 524
519 if (!l2cap_mode_supported(l2cap_pi(sk)->mode, 525 if (!l2cap_mode_supported(chan->mode,
520 conn->feat_mask) 526 conn->feat_mask)
521 && l2cap_pi(sk)->conf_state & 527 && chan->conf_state &
522 L2CAP_CONF_STATE2_DEVICE) { 528 L2CAP_CONF_STATE2_DEVICE) {
523 tmp1 = kzalloc(sizeof(struct sock_del_list), 529 /* __l2cap_sock_close() calls list_del(chan)
524 GFP_ATOMIC); 530 * so release the lock */
525 tmp1->sk = sk; 531 read_unlock_bh(&conn->chan_lock);
526 list_add_tail(&tmp1->list, &del.list); 532 __l2cap_sock_close(sk, ECONNRESET);
533 read_lock_bh(&conn->chan_lock);
527 bh_unlock_sock(sk); 534 bh_unlock_sock(sk);
528 continue; 535 continue;
529 } 536 }
530 537
531 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 538 req.scid = cpu_to_le16(chan->scid);
532 req.psm = l2cap_pi(sk)->psm; 539 req.psm = chan->psm;
533 540
534 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 541 chan->ident = l2cap_get_ident(conn);
535 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 542 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
536 543
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 544 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
538 L2CAP_CONN_REQ, sizeof(req), &req); 545 sizeof(req), &req);
539 546
540 } else if (sk->sk_state == BT_CONNECT2) { 547 } else if (sk->sk_state == BT_CONNECT2) {
541 struct l2cap_conn_rsp rsp; 548 struct l2cap_conn_rsp rsp;
542 char buf[128]; 549 char buf[128];
543 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 550 rsp.scid = cpu_to_le16(chan->dcid);
544 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 551 rsp.dcid = cpu_to_le16(chan->scid);
545 552
546 if (l2cap_check_security(sk)) { 553 if (l2cap_check_security(chan)) {
547 if (bt_sk(sk)->defer_setup) { 554 if (bt_sk(sk)->defer_setup) {
548 struct sock *parent = bt_sk(sk)->parent; 555 struct sock *parent = bt_sk(sk)->parent;
549 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 556 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
@@ -560,33 +567,25 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 567 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
561 } 568 }
562 569
563 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 570 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
564 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 571 sizeof(rsp), &rsp);
565 572
566 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT || 573 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
567 rsp.result != L2CAP_CR_SUCCESS) { 574 rsp.result != L2CAP_CR_SUCCESS) {
568 bh_unlock_sock(sk); 575 bh_unlock_sock(sk);
569 continue; 576 continue;
570 } 577 }
571 578
572 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 579 chan->conf_state |= L2CAP_CONF_REQ_SENT;
573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 580 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
574 l2cap_build_conf_req(sk, buf), buf); 581 l2cap_build_conf_req(chan, buf), buf);
575 l2cap_pi(sk)->num_conf_req++; 582 chan->num_conf_req++;
576 } 583 }
577 584
578 bh_unlock_sock(sk); 585 bh_unlock_sock(sk);
579 } 586 }
580 587
581 read_unlock(&l->lock); 588 read_unlock(&conn->chan_lock);
582
583 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
584 bh_lock_sock(tmp1->sk);
585 __l2cap_sock_close(tmp1->sk, ECONNRESET);
586 bh_unlock_sock(tmp1->sk);
587 list_del(&tmp1->list);
588 kfree(tmp1);
589 }
590} 589}
591 590
592/* Find socket with cid and source bdaddr. 591/* Find socket with cid and source bdaddr.
@@ -594,16 +593,18 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
594 */ 593 */
595static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src) 594static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
596{ 595{
597 struct sock *s, *sk = NULL, *sk1 = NULL; 596 struct sock *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node; 597 struct hlist_node *node;
599 598
600 read_lock(&l2cap_sk_list.lock); 599 read_lock(&l2cap_sk_list.lock);
601 600
602 sk_for_each(sk, node, &l2cap_sk_list.head) { 601 sk_for_each(sk, node, &l2cap_sk_list.head) {
602 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
603
603 if (state && sk->sk_state != state) 604 if (state && sk->sk_state != state)
604 continue; 605 continue;
605 606
606 if (l2cap_pi(sk)->scid == cid) { 607 if (chan->scid == cid) {
607 /* Exact match. */ 608 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src)) 609 if (!bacmp(&bt_sk(sk)->src, src))
609 break; 610 break;
@@ -613,18 +614,16 @@ static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
613 sk1 = sk; 614 sk1 = sk;
614 } 615 }
615 } 616 }
616 s = node ? sk : sk1; 617
617 if (s)
618 bh_lock_sock(s);
619 read_unlock(&l2cap_sk_list.lock); 618 read_unlock(&l2cap_sk_list.lock);
620 619
621 return s; 620 return node ? sk : sk1;
622} 621}
623 622
624static void l2cap_le_conn_ready(struct l2cap_conn *conn) 623static void l2cap_le_conn_ready(struct l2cap_conn *conn)
625{ 624{
626 struct l2cap_chan_list *list = &conn->chan_list; 625 struct sock *parent, *sk;
627 struct sock *parent, *uninitialized_var(sk); 626 struct l2cap_chan *chan;
628 627
629 BT_DBG(""); 628 BT_DBG("");
630 629
@@ -634,6 +633,8 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
634 if (!parent) 633 if (!parent)
635 return; 634 return;
636 635
636 bh_lock_sock(parent);
637
637 /* Check for backlog size */ 638 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) { 639 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog); 640 BT_DBG("backlog full %d", parent->sk_ack_backlog);
@@ -644,22 +645,33 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
644 if (!sk) 645 if (!sk)
645 goto clean; 646 goto clean;
646 647
647 write_lock_bh(&list->lock); 648 chan = l2cap_chan_alloc(sk);
649 if (!chan) {
650 l2cap_sock_kill(sk);
651 goto clean;
652 }
653
654 l2cap_pi(sk)->chan = chan;
655
656 write_lock_bh(&conn->chan_lock);
648 657
649 hci_conn_hold(conn->hcon); 658 hci_conn_hold(conn->hcon);
650 659
651 l2cap_sock_init(sk, parent); 660 l2cap_sock_init(sk, parent);
661
652 bacpy(&bt_sk(sk)->src, conn->src); 662 bacpy(&bt_sk(sk)->src, conn->src);
653 bacpy(&bt_sk(sk)->dst, conn->dst); 663 bacpy(&bt_sk(sk)->dst, conn->dst);
654 664
655 __l2cap_chan_add(conn, sk, parent); 665 bt_accept_enqueue(parent, sk);
666
667 __l2cap_chan_add(conn, chan);
656 668
657 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 669 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658 670
659 sk->sk_state = BT_CONNECTED; 671 sk->sk_state = BT_CONNECTED;
660 parent->sk_data_ready(parent, 0); 672 parent->sk_data_ready(parent, 0);
661 673
662 write_unlock_bh(&list->lock); 674 write_unlock_bh(&conn->chan_lock);
663 675
664clean: 676clean:
665 bh_unlock_sock(parent); 677 bh_unlock_sock(parent);
@@ -667,17 +679,18 @@ clean:
667 679
668static void l2cap_conn_ready(struct l2cap_conn *conn) 680static void l2cap_conn_ready(struct l2cap_conn *conn)
669{ 681{
670 struct l2cap_chan_list *l = &conn->chan_list; 682 struct l2cap_chan *chan;
671 struct sock *sk;
672 683
673 BT_DBG("conn %p", conn); 684 BT_DBG("conn %p", conn);
674 685
675 if (!conn->hcon->out && conn->hcon->type == LE_LINK) 686 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 l2cap_le_conn_ready(conn); 687 l2cap_le_conn_ready(conn);
677 688
678 read_lock(&l->lock); 689 read_lock(&conn->chan_lock);
690
691 list_for_each_entry(chan, &conn->chan_l, list) {
692 struct sock *sk = chan->sk;
679 693
680 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
681 bh_lock_sock(sk); 694 bh_lock_sock(sk);
682 695
683 if (conn->hcon->type == LE_LINK) { 696 if (conn->hcon->type == LE_LINK) {
@@ -692,30 +705,31 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
692 sk->sk_state = BT_CONNECTED; 705 sk->sk_state = BT_CONNECTED;
693 sk->sk_state_change(sk); 706 sk->sk_state_change(sk);
694 } else if (sk->sk_state == BT_CONNECT) 707 } else if (sk->sk_state == BT_CONNECT)
695 l2cap_do_start(sk); 708 l2cap_do_start(chan);
696 709
697 bh_unlock_sock(sk); 710 bh_unlock_sock(sk);
698 } 711 }
699 712
700 read_unlock(&l->lock); 713 read_unlock(&conn->chan_lock);
701} 714}
702 715
703/* Notify sockets that we cannot guaranty reliability anymore */ 716/* Notify sockets that we cannot guaranty reliability anymore */
704static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) 717static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
705{ 718{
706 struct l2cap_chan_list *l = &conn->chan_list; 719 struct l2cap_chan *chan;
707 struct sock *sk;
708 720
709 BT_DBG("conn %p", conn); 721 BT_DBG("conn %p", conn);
710 722
711 read_lock(&l->lock); 723 read_lock(&conn->chan_lock);
724
725 list_for_each_entry(chan, &conn->chan_l, list) {
726 struct sock *sk = chan->sk;
712 727
713 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 728 if (chan->force_reliable)
714 if (l2cap_pi(sk)->force_reliable)
715 sk->sk_err = err; 729 sk->sk_err = err;
716 } 730 }
717 731
718 read_unlock(&l->lock); 732 read_unlock(&conn->chan_lock);
719} 733}
720 734
721static void l2cap_info_timeout(unsigned long arg) 735static void l2cap_info_timeout(unsigned long arg)
@@ -755,7 +769,9 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
755 conn->feat_mask = 0; 769 conn->feat_mask = 0;
756 770
757 spin_lock_init(&conn->lock); 771 spin_lock_init(&conn->lock);
758 rwlock_init(&conn->chan_list.lock); 772 rwlock_init(&conn->chan_lock);
773
774 INIT_LIST_HEAD(&conn->chan_l);
759 775
760 if (hcon->type != LE_LINK) 776 if (hcon->type != LE_LINK)
761 setup_timer(&conn->info_timer, l2cap_info_timeout, 777 setup_timer(&conn->info_timer, l2cap_info_timeout,
@@ -769,6 +785,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
769static void l2cap_conn_del(struct hci_conn *hcon, int err) 785static void l2cap_conn_del(struct hci_conn *hcon, int err)
770{ 786{
771 struct l2cap_conn *conn = hcon->l2cap_data; 787 struct l2cap_conn *conn = hcon->l2cap_data;
788 struct l2cap_chan *chan, *l;
772 struct sock *sk; 789 struct sock *sk;
773 790
774 if (!conn) 791 if (!conn)
@@ -779,9 +796,10 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
779 kfree_skb(conn->rx_skb); 796 kfree_skb(conn->rx_skb);
780 797
781 /* Kill channels */ 798 /* Kill channels */
782 while ((sk = conn->chan_list.head)) { 799 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
800 sk = chan->sk;
783 bh_lock_sock(sk); 801 bh_lock_sock(sk);
784 l2cap_chan_del(sk, err); 802 l2cap_chan_del(chan, err);
785 bh_unlock_sock(sk); 803 bh_unlock_sock(sk);
786 l2cap_sock_kill(sk); 804 l2cap_sock_kill(sk);
787 } 805 }
@@ -793,12 +811,11 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
793 kfree(conn); 811 kfree(conn);
794} 812}
795 813
796static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) 814static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
797{ 815{
798 struct l2cap_chan_list *l = &conn->chan_list; 816 write_lock_bh(&conn->chan_lock);
799 write_lock_bh(&l->lock); 817 __l2cap_chan_add(conn, chan);
800 __l2cap_chan_add(conn, sk, parent); 818 write_unlock_bh(&conn->chan_lock);
801 write_unlock_bh(&l->lock);
802} 819}
803 820
804/* ---- Socket interface ---- */ 821/* ---- Socket interface ---- */
@@ -814,10 +831,12 @@ static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
814 read_lock(&l2cap_sk_list.lock); 831 read_lock(&l2cap_sk_list.lock);
815 832
816 sk_for_each(sk, node, &l2cap_sk_list.head) { 833 sk_for_each(sk, node, &l2cap_sk_list.head) {
834 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
835
817 if (state && sk->sk_state != state) 836 if (state && sk->sk_state != state)
818 continue; 837 continue;
819 838
820 if (l2cap_pi(sk)->psm == psm) { 839 if (chan->psm == psm) {
821 /* Exact match. */ 840 /* Exact match. */
822 if (!bacmp(&bt_sk(sk)->src, src)) 841 if (!bacmp(&bt_sk(sk)->src, src))
823 break; 842 break;
@@ -833,8 +852,9 @@ static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
833 return node ? sk : sk1; 852 return node ? sk : sk1;
834} 853}
835 854
836int l2cap_do_connect(struct sock *sk) 855int l2cap_chan_connect(struct l2cap_chan *chan)
837{ 856{
857 struct sock *sk = chan->sk;
838 bdaddr_t *src = &bt_sk(sk)->src; 858 bdaddr_t *src = &bt_sk(sk)->src;
839 bdaddr_t *dst = &bt_sk(sk)->dst; 859 bdaddr_t *dst = &bt_sk(sk)->dst;
840 struct l2cap_conn *conn; 860 struct l2cap_conn *conn;
@@ -844,7 +864,7 @@ int l2cap_do_connect(struct sock *sk)
844 int err; 864 int err;
845 865
846 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), 866 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
847 l2cap_pi(sk)->psm); 867 chan->psm);
848 868
849 hdev = hci_get_route(dst, src); 869 hdev = hci_get_route(dst, src);
850 if (!hdev) 870 if (!hdev)
@@ -852,14 +872,14 @@ int l2cap_do_connect(struct sock *sk)
852 872
853 hci_dev_lock_bh(hdev); 873 hci_dev_lock_bh(hdev);
854 874
855 auth_type = l2cap_get_auth_type(sk); 875 auth_type = l2cap_get_auth_type(chan);
856 876
857 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA) 877 if (chan->dcid == L2CAP_CID_LE_DATA)
858 hcon = hci_connect(hdev, LE_LINK, dst, 878 hcon = hci_connect(hdev, LE_LINK, dst,
859 l2cap_pi(sk)->sec_level, auth_type); 879 chan->sec_level, auth_type);
860 else 880 else
861 hcon = hci_connect(hdev, ACL_LINK, dst, 881 hcon = hci_connect(hdev, ACL_LINK, dst,
862 l2cap_pi(sk)->sec_level, auth_type); 882 chan->sec_level, auth_type);
863 883
864 if (IS_ERR(hcon)) { 884 if (IS_ERR(hcon)) {
865 err = PTR_ERR(hcon); 885 err = PTR_ERR(hcon);
@@ -876,7 +896,7 @@ int l2cap_do_connect(struct sock *sk)
876 /* Update source addr of the socket */ 896 /* Update source addr of the socket */
877 bacpy(src, conn->src); 897 bacpy(src, conn->src);
878 898
879 l2cap_chan_add(conn, sk, NULL); 899 l2cap_chan_add(conn, chan);
880 900
881 sk->sk_state = BT_CONNECT; 901 sk->sk_state = BT_CONNECT;
882 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 902 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
@@ -885,10 +905,10 @@ int l2cap_do_connect(struct sock *sk)
885 if (sk->sk_type != SOCK_SEQPACKET && 905 if (sk->sk_type != SOCK_SEQPACKET &&
886 sk->sk_type != SOCK_STREAM) { 906 sk->sk_type != SOCK_STREAM) {
887 l2cap_sock_clear_timer(sk); 907 l2cap_sock_clear_timer(sk);
888 if (l2cap_check_security(sk)) 908 if (l2cap_check_security(chan))
889 sk->sk_state = BT_CONNECTED; 909 sk->sk_state = BT_CONNECTED;
890 } else 910 } else
891 l2cap_do_start(sk); 911 l2cap_do_start(chan);
892 } 912 }
893 913
894 err = 0; 914 err = 0;
@@ -901,12 +921,13 @@ done:
901 921
902int __l2cap_wait_ack(struct sock *sk) 922int __l2cap_wait_ack(struct sock *sk)
903{ 923{
924 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
904 DECLARE_WAITQUEUE(wait, current); 925 DECLARE_WAITQUEUE(wait, current);
905 int err = 0; 926 int err = 0;
906 int timeo = HZ/5; 927 int timeo = HZ/5;
907 928
908 add_wait_queue(sk_sleep(sk), &wait); 929 add_wait_queue(sk_sleep(sk), &wait);
909 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) { 930 while ((chan->unacked_frames > 0 && chan->conn)) {
910 set_current_state(TASK_INTERRUPTIBLE); 931 set_current_state(TASK_INTERRUPTIBLE);
911 932
912 if (!timeo) 933 if (!timeo)
@@ -932,68 +953,69 @@ int __l2cap_wait_ack(struct sock *sk)
932 953
933static void l2cap_monitor_timeout(unsigned long arg) 954static void l2cap_monitor_timeout(unsigned long arg)
934{ 955{
935 struct sock *sk = (void *) arg; 956 struct l2cap_chan *chan = (void *) arg;
957 struct sock *sk = chan->sk;
936 958
937 BT_DBG("sk %p", sk); 959 BT_DBG("chan %p", chan);
938 960
939 bh_lock_sock(sk); 961 bh_lock_sock(sk);
940 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { 962 if (chan->retry_count >= chan->remote_max_tx) {
941 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED); 963 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
942 bh_unlock_sock(sk); 964 bh_unlock_sock(sk);
943 return; 965 return;
944 } 966 }
945 967
946 l2cap_pi(sk)->retry_count++; 968 chan->retry_count++;
947 __mod_monitor_timer(); 969 __mod_monitor_timer();
948 970
949 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL); 971 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
950 bh_unlock_sock(sk); 972 bh_unlock_sock(sk);
951} 973}
952 974
953static void l2cap_retrans_timeout(unsigned long arg) 975static void l2cap_retrans_timeout(unsigned long arg)
954{ 976{
955 struct sock *sk = (void *) arg; 977 struct l2cap_chan *chan = (void *) arg;
978 struct sock *sk = chan->sk;
956 979
957 BT_DBG("sk %p", sk); 980 BT_DBG("chan %p", chan);
958 981
959 bh_lock_sock(sk); 982 bh_lock_sock(sk);
960 l2cap_pi(sk)->retry_count = 1; 983 chan->retry_count = 1;
961 __mod_monitor_timer(); 984 __mod_monitor_timer();
962 985
963 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; 986 chan->conn_state |= L2CAP_CONN_WAIT_F;
964 987
965 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL); 988 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
966 bh_unlock_sock(sk); 989 bh_unlock_sock(sk);
967} 990}
968 991
969static void l2cap_drop_acked_frames(struct sock *sk) 992static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
970{ 993{
971 struct sk_buff *skb; 994 struct sk_buff *skb;
972 995
973 while ((skb = skb_peek(TX_QUEUE(sk))) && 996 while ((skb = skb_peek(&chan->tx_q)) &&
974 l2cap_pi(sk)->unacked_frames) { 997 chan->unacked_frames) {
975 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq) 998 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
976 break; 999 break;
977 1000
978 skb = skb_dequeue(TX_QUEUE(sk)); 1001 skb = skb_dequeue(&chan->tx_q);
979 kfree_skb(skb); 1002 kfree_skb(skb);
980 1003
981 l2cap_pi(sk)->unacked_frames--; 1004 chan->unacked_frames--;
982 } 1005 }
983 1006
984 if (!l2cap_pi(sk)->unacked_frames) 1007 if (!chan->unacked_frames)
985 del_timer(&l2cap_pi(sk)->retrans_timer); 1008 del_timer(&chan->retrans_timer);
986} 1009}
987 1010
988void l2cap_do_send(struct sock *sk, struct sk_buff *skb) 1011void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
989{ 1012{
990 struct l2cap_pinfo *pi = l2cap_pi(sk); 1013 struct hci_conn *hcon = chan->conn->hcon;
991 struct hci_conn *hcon = pi->conn->hcon;
992 u16 flags; 1014 u16 flags;
993 1015
994 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); 1016 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
995 1017
996 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev)) 1018 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
997 flags = ACL_START_NO_FLUSH; 1019 flags = ACL_START_NO_FLUSH;
998 else 1020 else
999 flags = ACL_START; 1021 flags = ACL_START;
@@ -1001,35 +1023,33 @@ void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1001 hci_send_acl(hcon, skb, flags); 1023 hci_send_acl(hcon, skb, flags);
1002} 1024}
1003 1025
1004void l2cap_streaming_send(struct sock *sk) 1026void l2cap_streaming_send(struct l2cap_chan *chan)
1005{ 1027{
1006 struct sk_buff *skb; 1028 struct sk_buff *skb;
1007 struct l2cap_pinfo *pi = l2cap_pi(sk);
1008 u16 control, fcs; 1029 u16 control, fcs;
1009 1030
1010 while ((skb = skb_dequeue(TX_QUEUE(sk)))) { 1031 while ((skb = skb_dequeue(&chan->tx_q))) {
1011 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE); 1032 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1012 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; 1033 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1013 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE); 1034 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1014 1035
1015 if (pi->fcs == L2CAP_FCS_CRC16) { 1036 if (chan->fcs == L2CAP_FCS_CRC16) {
1016 fcs = crc16(0, (u8 *)skb->data, skb->len - 2); 1037 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1017 put_unaligned_le16(fcs, skb->data + skb->len - 2); 1038 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1018 } 1039 }
1019 1040
1020 l2cap_do_send(sk, skb); 1041 l2cap_do_send(chan, skb);
1021 1042
1022 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; 1043 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1023 } 1044 }
1024} 1045}
1025 1046
1026static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq) 1047static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1027{ 1048{
1028 struct l2cap_pinfo *pi = l2cap_pi(sk);
1029 struct sk_buff *skb, *tx_skb; 1049 struct sk_buff *skb, *tx_skb;
1030 u16 control, fcs; 1050 u16 control, fcs;
1031 1051
1032 skb = skb_peek(TX_QUEUE(sk)); 1052 skb = skb_peek(&chan->tx_q);
1033 if (!skb) 1053 if (!skb)
1034 return; 1054 return;
1035 1055
@@ -1037,14 +1057,14 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1037 if (bt_cb(skb)->tx_seq == tx_seq) 1057 if (bt_cb(skb)->tx_seq == tx_seq)
1038 break; 1058 break;
1039 1059
1040 if (skb_queue_is_last(TX_QUEUE(sk), skb)) 1060 if (skb_queue_is_last(&chan->tx_q, skb))
1041 return; 1061 return;
1042 1062
1043 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb))); 1063 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1044 1064
1045 if (pi->remote_max_tx && 1065 if (chan->remote_max_tx &&
1046 bt_cb(skb)->retries == pi->remote_max_tx) { 1066 bt_cb(skb)->retries == chan->remote_max_tx) {
1047 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED); 1067 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1048 return; 1068 return;
1049 } 1069 }
1050 1070
@@ -1053,39 +1073,39 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1053 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1073 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1054 control &= L2CAP_CTRL_SAR; 1074 control &= L2CAP_CTRL_SAR;
1055 1075
1056 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { 1076 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1057 control |= L2CAP_CTRL_FINAL; 1077 control |= L2CAP_CTRL_FINAL;
1058 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; 1078 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1059 } 1079 }
1060 1080
1061 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1081 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1062 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1082 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1063 1083
1064 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1084 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1065 1085
1066 if (pi->fcs == L2CAP_FCS_CRC16) { 1086 if (chan->fcs == L2CAP_FCS_CRC16) {
1067 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); 1087 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1068 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); 1088 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1069 } 1089 }
1070 1090
1071 l2cap_do_send(sk, tx_skb); 1091 l2cap_do_send(chan, tx_skb);
1072} 1092}
1073 1093
1074int l2cap_ertm_send(struct sock *sk) 1094int l2cap_ertm_send(struct l2cap_chan *chan)
1075{ 1095{
1076 struct sk_buff *skb, *tx_skb; 1096 struct sk_buff *skb, *tx_skb;
1077 struct l2cap_pinfo *pi = l2cap_pi(sk); 1097 struct sock *sk = chan->sk;
1078 u16 control, fcs; 1098 u16 control, fcs;
1079 int nsent = 0; 1099 int nsent = 0;
1080 1100
1081 if (sk->sk_state != BT_CONNECTED) 1101 if (sk->sk_state != BT_CONNECTED)
1082 return -ENOTCONN; 1102 return -ENOTCONN;
1083 1103
1084 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) { 1104 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1085 1105
1086 if (pi->remote_max_tx && 1106 if (chan->remote_max_tx &&
1087 bt_cb(skb)->retries == pi->remote_max_tx) { 1107 bt_cb(skb)->retries == chan->remote_max_tx) {
1088 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED); 1108 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1089 break; 1109 break;
1090 } 1110 }
1091 1111
@@ -1096,36 +1116,36 @@ int l2cap_ertm_send(struct sock *sk)
1096 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1116 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1097 control &= L2CAP_CTRL_SAR; 1117 control &= L2CAP_CTRL_SAR;
1098 1118
1099 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { 1119 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1100 control |= L2CAP_CTRL_FINAL; 1120 control |= L2CAP_CTRL_FINAL;
1101 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; 1121 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1102 } 1122 }
1103 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1123 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1104 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1124 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1105 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1125 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1106 1126
1107 1127
1108 if (pi->fcs == L2CAP_FCS_CRC16) { 1128 if (chan->fcs == L2CAP_FCS_CRC16) {
1109 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); 1129 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1110 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); 1130 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1111 } 1131 }
1112 1132
1113 l2cap_do_send(sk, tx_skb); 1133 l2cap_do_send(chan, tx_skb);
1114 1134
1115 __mod_retrans_timer(); 1135 __mod_retrans_timer();
1116 1136
1117 bt_cb(skb)->tx_seq = pi->next_tx_seq; 1137 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1118 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; 1138 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1119 1139
1120 if (bt_cb(skb)->retries == 1) 1140 if (bt_cb(skb)->retries == 1)
1121 pi->unacked_frames++; 1141 chan->unacked_frames++;
1122 1142
1123 pi->frames_sent++; 1143 chan->frames_sent++;
1124 1144
1125 if (skb_queue_is_last(TX_QUEUE(sk), skb)) 1145 if (skb_queue_is_last(&chan->tx_q, skb))
1126 sk->sk_send_head = NULL; 1146 chan->tx_send_head = NULL;
1127 else 1147 else
1128 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); 1148 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1129 1149
1130 nsent++; 1150 nsent++;
1131 } 1151 }
@@ -1133,41 +1153,39 @@ int l2cap_ertm_send(struct sock *sk)
1133 return nsent; 1153 return nsent;
1134} 1154}
1135 1155
1136static int l2cap_retransmit_frames(struct sock *sk) 1156static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1137{ 1157{
1138 struct l2cap_pinfo *pi = l2cap_pi(sk);
1139 int ret; 1158 int ret;
1140 1159
1141 if (!skb_queue_empty(TX_QUEUE(sk))) 1160 if (!skb_queue_empty(&chan->tx_q))
1142 sk->sk_send_head = TX_QUEUE(sk)->next; 1161 chan->tx_send_head = chan->tx_q.next;
1143 1162
1144 pi->next_tx_seq = pi->expected_ack_seq; 1163 chan->next_tx_seq = chan->expected_ack_seq;
1145 ret = l2cap_ertm_send(sk); 1164 ret = l2cap_ertm_send(chan);
1146 return ret; 1165 return ret;
1147} 1166}
1148 1167
1149static void l2cap_send_ack(struct l2cap_pinfo *pi) 1168static void l2cap_send_ack(struct l2cap_chan *chan)
1150{ 1169{
1151 struct sock *sk = (struct sock *)pi;
1152 u16 control = 0; 1170 u16 control = 0;
1153 1171
1154 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1172 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1155 1173
1156 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 1174 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1157 control |= L2CAP_SUPER_RCV_NOT_READY; 1175 control |= L2CAP_SUPER_RCV_NOT_READY;
1158 pi->conn_state |= L2CAP_CONN_RNR_SENT; 1176 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1159 l2cap_send_sframe(pi, control); 1177 l2cap_send_sframe(chan, control);
1160 return; 1178 return;
1161 } 1179 }
1162 1180
1163 if (l2cap_ertm_send(sk) > 0) 1181 if (l2cap_ertm_send(chan) > 0)
1164 return; 1182 return;
1165 1183
1166 control |= L2CAP_SUPER_RCV_READY; 1184 control |= L2CAP_SUPER_RCV_READY;
1167 l2cap_send_sframe(pi, control); 1185 l2cap_send_sframe(chan, control);
1168} 1186}
1169 1187
1170static void l2cap_send_srejtail(struct sock *sk) 1188static void l2cap_send_srejtail(struct l2cap_chan *chan)
1171{ 1189{
1172 struct srej_list *tail; 1190 struct srej_list *tail;
1173 u16 control; 1191 u16 control;
@@ -1175,15 +1193,15 @@ static void l2cap_send_srejtail(struct sock *sk)
1175 control = L2CAP_SUPER_SELECT_REJECT; 1193 control = L2CAP_SUPER_SELECT_REJECT;
1176 control |= L2CAP_CTRL_FINAL; 1194 control |= L2CAP_CTRL_FINAL;
1177 1195
1178 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list); 1196 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1179 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1197 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1180 1198
1181 l2cap_send_sframe(l2cap_pi(sk), control); 1199 l2cap_send_sframe(chan, control);
1182} 1200}
1183 1201
1184static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) 1202static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1185{ 1203{
1186 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1204 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1187 struct sk_buff **frag; 1205 struct sk_buff **frag;
1188 int err, sent = 0; 1206 int err, sent = 0;
1189 1207
@@ -1213,9 +1231,10 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1213 return sent; 1231 return sent;
1214} 1232}
1215 1233
1216struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1234struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1217{ 1235{
1218 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1236 struct sock *sk = chan->sk;
1237 struct l2cap_conn *conn = chan->conn;
1219 struct sk_buff *skb; 1238 struct sk_buff *skb;
1220 int err, count, hlen = L2CAP_HDR_SIZE + 2; 1239 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1221 struct l2cap_hdr *lh; 1240 struct l2cap_hdr *lh;
@@ -1230,9 +1249,9 @@ struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, s
1230 1249
1231 /* Create L2CAP header */ 1250 /* Create L2CAP header */
1232 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1251 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1233 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1252 lh->cid = cpu_to_le16(chan->dcid);
1234 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1253 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1235 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2)); 1254 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1236 1255
1237 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1256 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1238 if (unlikely(err < 0)) { 1257 if (unlikely(err < 0)) {
@@ -1242,9 +1261,10 @@ struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, s
1242 return skb; 1261 return skb;
1243} 1262}
1244 1263
1245struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1264struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1246{ 1265{
1247 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1266 struct sock *sk = chan->sk;
1267 struct l2cap_conn *conn = chan->conn;
1248 struct sk_buff *skb; 1268 struct sk_buff *skb;
1249 int err, count, hlen = L2CAP_HDR_SIZE; 1269 int err, count, hlen = L2CAP_HDR_SIZE;
1250 struct l2cap_hdr *lh; 1270 struct l2cap_hdr *lh;
@@ -1259,7 +1279,7 @@ struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size
1259 1279
1260 /* Create L2CAP header */ 1280 /* Create L2CAP header */
1261 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1281 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1262 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1282 lh->cid = cpu_to_le16(chan->dcid);
1263 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1283 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1264 1284
1265 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1285 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
@@ -1270,9 +1290,10 @@ struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size
1270 return skb; 1290 return skb;
1271} 1291}
1272 1292
1273struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen) 1293struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1274{ 1294{
1275 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1295 struct sock *sk = chan->sk;
1296 struct l2cap_conn *conn = chan->conn;
1276 struct sk_buff *skb; 1297 struct sk_buff *skb;
1277 int err, count, hlen = L2CAP_HDR_SIZE + 2; 1298 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1278 struct l2cap_hdr *lh; 1299 struct l2cap_hdr *lh;
@@ -1285,7 +1306,7 @@ struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, siz
1285 if (sdulen) 1306 if (sdulen)
1286 hlen += 2; 1307 hlen += 2;
1287 1308
1288 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) 1309 if (chan->fcs == L2CAP_FCS_CRC16)
1289 hlen += 2; 1310 hlen += 2;
1290 1311
1291 count = min_t(unsigned int, (conn->mtu - hlen), len); 1312 count = min_t(unsigned int, (conn->mtu - hlen), len);
@@ -1296,7 +1317,7 @@ struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, siz
1296 1317
1297 /* Create L2CAP header */ 1318 /* Create L2CAP header */
1298 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1319 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1299 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1320 lh->cid = cpu_to_le16(chan->dcid);
1300 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1321 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1301 put_unaligned_le16(control, skb_put(skb, 2)); 1322 put_unaligned_le16(control, skb_put(skb, 2));
1302 if (sdulen) 1323 if (sdulen)
@@ -1308,16 +1329,15 @@ struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, siz
1308 return ERR_PTR(err); 1329 return ERR_PTR(err);
1309 } 1330 }
1310 1331
1311 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) 1332 if (chan->fcs == L2CAP_FCS_CRC16)
1312 put_unaligned_le16(0, skb_put(skb, 2)); 1333 put_unaligned_le16(0, skb_put(skb, 2));
1313 1334
1314 bt_cb(skb)->retries = 0; 1335 bt_cb(skb)->retries = 0;
1315 return skb; 1336 return skb;
1316} 1337}
1317 1338
1318int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len) 1339int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1319{ 1340{
1320 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 struct sk_buff *skb; 1341 struct sk_buff *skb;
1322 struct sk_buff_head sar_queue; 1342 struct sk_buff_head sar_queue;
1323 u16 control; 1343 u16 control;
@@ -1325,26 +1345,26 @@ int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1325 1345
1326 skb_queue_head_init(&sar_queue); 1346 skb_queue_head_init(&sar_queue);
1327 control = L2CAP_SDU_START; 1347 control = L2CAP_SDU_START;
1328 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len); 1348 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1329 if (IS_ERR(skb)) 1349 if (IS_ERR(skb))
1330 return PTR_ERR(skb); 1350 return PTR_ERR(skb);
1331 1351
1332 __skb_queue_tail(&sar_queue, skb); 1352 __skb_queue_tail(&sar_queue, skb);
1333 len -= pi->remote_mps; 1353 len -= chan->remote_mps;
1334 size += pi->remote_mps; 1354 size += chan->remote_mps;
1335 1355
1336 while (len > 0) { 1356 while (len > 0) {
1337 size_t buflen; 1357 size_t buflen;
1338 1358
1339 if (len > pi->remote_mps) { 1359 if (len > chan->remote_mps) {
1340 control = L2CAP_SDU_CONTINUE; 1360 control = L2CAP_SDU_CONTINUE;
1341 buflen = pi->remote_mps; 1361 buflen = chan->remote_mps;
1342 } else { 1362 } else {
1343 control = L2CAP_SDU_END; 1363 control = L2CAP_SDU_END;
1344 buflen = len; 1364 buflen = len;
1345 } 1365 }
1346 1366
1347 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0); 1367 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1348 if (IS_ERR(skb)) { 1368 if (IS_ERR(skb)) {
1349 skb_queue_purge(&sar_queue); 1369 skb_queue_purge(&sar_queue);
1350 return PTR_ERR(skb); 1370 return PTR_ERR(skb);
@@ -1354,9 +1374,9 @@ int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1354 len -= buflen; 1374 len -= buflen;
1355 size += buflen; 1375 size += buflen;
1356 } 1376 }
1357 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk)); 1377 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1358 if (sk->sk_send_head == NULL) 1378 if (chan->tx_send_head == NULL)
1359 sk->sk_send_head = sar_queue.next; 1379 chan->tx_send_head = sar_queue.next;
1360 1380
1361 return size; 1381 return size;
1362} 1382}
@@ -1364,10 +1384,11 @@ int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1364static void l2cap_chan_ready(struct sock *sk) 1384static void l2cap_chan_ready(struct sock *sk)
1365{ 1385{
1366 struct sock *parent = bt_sk(sk)->parent; 1386 struct sock *parent = bt_sk(sk)->parent;
1387 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1367 1388
1368 BT_DBG("sk %p, parent %p", sk, parent); 1389 BT_DBG("sk %p, parent %p", sk, parent);
1369 1390
1370 l2cap_pi(sk)->conf_state = 0; 1391 chan->conf_state = 0;
1371 l2cap_sock_clear_timer(sk); 1392 l2cap_sock_clear_timer(sk);
1372 1393
1373 if (!parent) { 1394 if (!parent) {
@@ -1387,14 +1408,14 @@ static void l2cap_chan_ready(struct sock *sk)
1387/* Copy frame to all raw sockets on that connection */ 1408/* Copy frame to all raw sockets on that connection */
1388static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 1409static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1389{ 1410{
1390 struct l2cap_chan_list *l = &conn->chan_list;
1391 struct sk_buff *nskb; 1411 struct sk_buff *nskb;
1392 struct sock *sk; 1412 struct l2cap_chan *chan;
1393 1413
1394 BT_DBG("conn %p", conn); 1414 BT_DBG("conn %p", conn);
1395 1415
1396 read_lock(&l->lock); 1416 read_lock(&conn->chan_lock);
1397 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 1417 list_for_each_entry(chan, &conn->chan_l, list) {
1418 struct sock *sk = chan->sk;
1398 if (sk->sk_type != SOCK_RAW) 1419 if (sk->sk_type != SOCK_RAW)
1399 continue; 1420 continue;
1400 1421
@@ -1408,7 +1429,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1408 if (sock_queue_rcv_skb(sk, nskb)) 1429 if (sock_queue_rcv_skb(sk, nskb))
1409 kfree_skb(nskb); 1430 kfree_skb(nskb);
1410 } 1431 }
1411 read_unlock(&l->lock); 1432 read_unlock(&conn->chan_lock);
1412} 1433}
1413 1434
1414/* ---- L2CAP signalling commands ---- */ 1435/* ---- L2CAP signalling commands ---- */
@@ -1540,32 +1561,35 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1540 1561
1541static void l2cap_ack_timeout(unsigned long arg) 1562static void l2cap_ack_timeout(unsigned long arg)
1542{ 1563{
1543 struct sock *sk = (void *) arg; 1564 struct l2cap_chan *chan = (void *) arg;
1544 1565
1545 bh_lock_sock(sk); 1566 bh_lock_sock(chan->sk);
1546 l2cap_send_ack(l2cap_pi(sk)); 1567 l2cap_send_ack(chan);
1547 bh_unlock_sock(sk); 1568 bh_unlock_sock(chan->sk);
1548} 1569}
1549 1570
1550static inline void l2cap_ertm_init(struct sock *sk) 1571static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1551{ 1572{
1552 l2cap_pi(sk)->expected_ack_seq = 0; 1573 struct sock *sk = chan->sk;
1553 l2cap_pi(sk)->unacked_frames = 0; 1574
1554 l2cap_pi(sk)->buffer_seq = 0; 1575 chan->expected_ack_seq = 0;
1555 l2cap_pi(sk)->num_acked = 0; 1576 chan->unacked_frames = 0;
1556 l2cap_pi(sk)->frames_sent = 0; 1577 chan->buffer_seq = 0;
1578 chan->num_acked = 0;
1579 chan->frames_sent = 0;
1580
1581 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1582 (unsigned long) chan);
1583 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1584 (unsigned long) chan);
1585 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1557 1586
1558 setup_timer(&l2cap_pi(sk)->retrans_timer, 1587 skb_queue_head_init(&chan->srej_q);
1559 l2cap_retrans_timeout, (unsigned long) sk); 1588 skb_queue_head_init(&chan->busy_q);
1560 setup_timer(&l2cap_pi(sk)->monitor_timer,
1561 l2cap_monitor_timeout, (unsigned long) sk);
1562 setup_timer(&l2cap_pi(sk)->ack_timer,
1563 l2cap_ack_timeout, (unsigned long) sk);
1564 1589
1565 __skb_queue_head_init(SREJ_QUEUE(sk)); 1590 INIT_LIST_HEAD(&chan->srej_l);
1566 __skb_queue_head_init(BUSY_QUEUE(sk));
1567 1591
1568 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work); 1592 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1569 1593
1570 sk->sk_backlog_rcv = l2cap_ertm_data_rcv; 1594 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1571} 1595}
@@ -1583,38 +1607,37 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1583 } 1607 }
1584} 1608}
1585 1609
1586int l2cap_build_conf_req(struct sock *sk, void *data) 1610static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1587{ 1611{
1588 struct l2cap_pinfo *pi = l2cap_pi(sk);
1589 struct l2cap_conf_req *req = data; 1612 struct l2cap_conf_req *req = data;
1590 struct l2cap_conf_rfc rfc = { .mode = pi->mode }; 1613 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1591 void *ptr = req->data; 1614 void *ptr = req->data;
1592 1615
1593 BT_DBG("sk %p", sk); 1616 BT_DBG("chan %p", chan);
1594 1617
1595 if (pi->num_conf_req || pi->num_conf_rsp) 1618 if (chan->num_conf_req || chan->num_conf_rsp)
1596 goto done; 1619 goto done;
1597 1620
1598 switch (pi->mode) { 1621 switch (chan->mode) {
1599 case L2CAP_MODE_STREAMING: 1622 case L2CAP_MODE_STREAMING:
1600 case L2CAP_MODE_ERTM: 1623 case L2CAP_MODE_ERTM:
1601 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE) 1624 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1602 break; 1625 break;
1603 1626
1604 /* fall through */ 1627 /* fall through */
1605 default: 1628 default:
1606 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); 1629 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1607 break; 1630 break;
1608 } 1631 }
1609 1632
1610done: 1633done:
1611 if (pi->imtu != L2CAP_DEFAULT_MTU) 1634 if (chan->imtu != L2CAP_DEFAULT_MTU)
1612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 1635 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1613 1636
1614 switch (pi->mode) { 1637 switch (chan->mode) {
1615 case L2CAP_MODE_BASIC: 1638 case L2CAP_MODE_BASIC:
1616 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) && 1639 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1617 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING)) 1640 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1618 break; 1641 break;
1619 1642
1620 rfc.mode = L2CAP_MODE_BASIC; 1643 rfc.mode = L2CAP_MODE_BASIC;
@@ -1630,24 +1653,24 @@ done:
1630 1653
1631 case L2CAP_MODE_ERTM: 1654 case L2CAP_MODE_ERTM:
1632 rfc.mode = L2CAP_MODE_ERTM; 1655 rfc.mode = L2CAP_MODE_ERTM;
1633 rfc.txwin_size = pi->tx_win; 1656 rfc.txwin_size = chan->tx_win;
1634 rfc.max_transmit = pi->max_tx; 1657 rfc.max_transmit = chan->max_tx;
1635 rfc.retrans_timeout = 0; 1658 rfc.retrans_timeout = 0;
1636 rfc.monitor_timeout = 0; 1659 rfc.monitor_timeout = 0;
1637 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 1660 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1638 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 1661 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1639 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1662 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1640 1663
1641 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 1664 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1642 (unsigned long) &rfc); 1665 (unsigned long) &rfc);
1643 1666
1644 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 1667 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1645 break; 1668 break;
1646 1669
1647 if (pi->fcs == L2CAP_FCS_NONE || 1670 if (chan->fcs == L2CAP_FCS_NONE ||
1648 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1671 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1649 pi->fcs = L2CAP_FCS_NONE; 1672 chan->fcs = L2CAP_FCS_NONE;
1650 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); 1673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1651 } 1674 }
1652 break; 1675 break;
1653 1676
@@ -1658,43 +1681,42 @@ done:
1658 rfc.retrans_timeout = 0; 1681 rfc.retrans_timeout = 0;
1659 rfc.monitor_timeout = 0; 1682 rfc.monitor_timeout = 0;
1660 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 1683 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1661 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 1684 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1662 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1685 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1663 1686
1664 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 1687 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1665 (unsigned long) &rfc); 1688 (unsigned long) &rfc);
1666 1689
1667 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 1690 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1668 break; 1691 break;
1669 1692
1670 if (pi->fcs == L2CAP_FCS_NONE || 1693 if (chan->fcs == L2CAP_FCS_NONE ||
1671 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1694 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1672 pi->fcs = L2CAP_FCS_NONE; 1695 chan->fcs = L2CAP_FCS_NONE;
1673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); 1696 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1674 } 1697 }
1675 break; 1698 break;
1676 } 1699 }
1677 1700
1678 req->dcid = cpu_to_le16(pi->dcid); 1701 req->dcid = cpu_to_le16(chan->dcid);
1679 req->flags = cpu_to_le16(0); 1702 req->flags = cpu_to_le16(0);
1680 1703
1681 return ptr - data; 1704 return ptr - data;
1682} 1705}
1683 1706
1684static int l2cap_parse_conf_req(struct sock *sk, void *data) 1707static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1685{ 1708{
1686 struct l2cap_pinfo *pi = l2cap_pi(sk);
1687 struct l2cap_conf_rsp *rsp = data; 1709 struct l2cap_conf_rsp *rsp = data;
1688 void *ptr = rsp->data; 1710 void *ptr = rsp->data;
1689 void *req = pi->conf_req; 1711 void *req = chan->conf_req;
1690 int len = pi->conf_len; 1712 int len = chan->conf_len;
1691 int type, hint, olen; 1713 int type, hint, olen;
1692 unsigned long val; 1714 unsigned long val;
1693 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 1715 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1694 u16 mtu = L2CAP_DEFAULT_MTU; 1716 u16 mtu = L2CAP_DEFAULT_MTU;
1695 u16 result = L2CAP_CONF_SUCCESS; 1717 u16 result = L2CAP_CONF_SUCCESS;
1696 1718
1697 BT_DBG("sk %p", sk); 1719 BT_DBG("chan %p", chan);
1698 1720
1699 while (len >= L2CAP_CONF_OPT_SIZE) { 1721 while (len >= L2CAP_CONF_OPT_SIZE) {
1700 len -= l2cap_get_conf_opt(&req, &type, &olen, &val); 1722 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
@@ -1708,7 +1730,7 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1708 break; 1730 break;
1709 1731
1710 case L2CAP_CONF_FLUSH_TO: 1732 case L2CAP_CONF_FLUSH_TO:
1711 pi->flush_to = val; 1733 chan->flush_to = val;
1712 break; 1734 break;
1713 1735
1714 case L2CAP_CONF_QOS: 1736 case L2CAP_CONF_QOS:
@@ -1721,7 +1743,7 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1721 1743
1722 case L2CAP_CONF_FCS: 1744 case L2CAP_CONF_FCS:
1723 if (val == L2CAP_FCS_NONE) 1745 if (val == L2CAP_FCS_NONE)
1724 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV; 1746 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1725 1747
1726 break; 1748 break;
1727 1749
@@ -1735,30 +1757,30 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1735 } 1757 }
1736 } 1758 }
1737 1759
1738 if (pi->num_conf_rsp || pi->num_conf_req > 1) 1760 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1739 goto done; 1761 goto done;
1740 1762
1741 switch (pi->mode) { 1763 switch (chan->mode) {
1742 case L2CAP_MODE_STREAMING: 1764 case L2CAP_MODE_STREAMING:
1743 case L2CAP_MODE_ERTM: 1765 case L2CAP_MODE_ERTM:
1744 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) { 1766 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1745 pi->mode = l2cap_select_mode(rfc.mode, 1767 chan->mode = l2cap_select_mode(rfc.mode,
1746 pi->conn->feat_mask); 1768 chan->conn->feat_mask);
1747 break; 1769 break;
1748 } 1770 }
1749 1771
1750 if (pi->mode != rfc.mode) 1772 if (chan->mode != rfc.mode)
1751 return -ECONNREFUSED; 1773 return -ECONNREFUSED;
1752 1774
1753 break; 1775 break;
1754 } 1776 }
1755 1777
1756done: 1778done:
1757 if (pi->mode != rfc.mode) { 1779 if (chan->mode != rfc.mode) {
1758 result = L2CAP_CONF_UNACCEPT; 1780 result = L2CAP_CONF_UNACCEPT;
1759 rfc.mode = pi->mode; 1781 rfc.mode = chan->mode;
1760 1782
1761 if (pi->num_conf_rsp == 1) 1783 if (chan->num_conf_rsp == 1)
1762 return -ECONNREFUSED; 1784 return -ECONNREFUSED;
1763 1785
1764 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1786 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
@@ -1773,32 +1795,32 @@ done:
1773 if (mtu < L2CAP_DEFAULT_MIN_MTU) 1795 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1774 result = L2CAP_CONF_UNACCEPT; 1796 result = L2CAP_CONF_UNACCEPT;
1775 else { 1797 else {
1776 pi->omtu = mtu; 1798 chan->omtu = mtu;
1777 pi->conf_state |= L2CAP_CONF_MTU_DONE; 1799 chan->conf_state |= L2CAP_CONF_MTU_DONE;
1778 } 1800 }
1779 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); 1801 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1780 1802
1781 switch (rfc.mode) { 1803 switch (rfc.mode) {
1782 case L2CAP_MODE_BASIC: 1804 case L2CAP_MODE_BASIC:
1783 pi->fcs = L2CAP_FCS_NONE; 1805 chan->fcs = L2CAP_FCS_NONE;
1784 pi->conf_state |= L2CAP_CONF_MODE_DONE; 1806 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1785 break; 1807 break;
1786 1808
1787 case L2CAP_MODE_ERTM: 1809 case L2CAP_MODE_ERTM:
1788 pi->remote_tx_win = rfc.txwin_size; 1810 chan->remote_tx_win = rfc.txwin_size;
1789 pi->remote_max_tx = rfc.max_transmit; 1811 chan->remote_max_tx = rfc.max_transmit;
1790 1812
1791 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10) 1813 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1792 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1814 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1793 1815
1794 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); 1816 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1795 1817
1796 rfc.retrans_timeout = 1818 rfc.retrans_timeout =
1797 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); 1819 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1798 rfc.monitor_timeout = 1820 rfc.monitor_timeout =
1799 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); 1821 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1800 1822
1801 pi->conf_state |= L2CAP_CONF_MODE_DONE; 1823 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1802 1824
1803 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1825 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1804 sizeof(rfc), (unsigned long) &rfc); 1826 sizeof(rfc), (unsigned long) &rfc);
@@ -1806,12 +1828,12 @@ done:
1806 break; 1828 break;
1807 1829
1808 case L2CAP_MODE_STREAMING: 1830 case L2CAP_MODE_STREAMING:
1809 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10) 1831 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1810 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1832 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1811 1833
1812 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); 1834 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1813 1835
1814 pi->conf_state |= L2CAP_CONF_MODE_DONE; 1836 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1815 1837
1816 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1838 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1817 sizeof(rfc), (unsigned long) &rfc); 1839 sizeof(rfc), (unsigned long) &rfc);
@@ -1822,29 +1844,28 @@ done:
1822 result = L2CAP_CONF_UNACCEPT; 1844 result = L2CAP_CONF_UNACCEPT;
1823 1845
1824 memset(&rfc, 0, sizeof(rfc)); 1846 memset(&rfc, 0, sizeof(rfc));
1825 rfc.mode = pi->mode; 1847 rfc.mode = chan->mode;
1826 } 1848 }
1827 1849
1828 if (result == L2CAP_CONF_SUCCESS) 1850 if (result == L2CAP_CONF_SUCCESS)
1829 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; 1851 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1830 } 1852 }
1831 rsp->scid = cpu_to_le16(pi->dcid); 1853 rsp->scid = cpu_to_le16(chan->dcid);
1832 rsp->result = cpu_to_le16(result); 1854 rsp->result = cpu_to_le16(result);
1833 rsp->flags = cpu_to_le16(0x0000); 1855 rsp->flags = cpu_to_le16(0x0000);
1834 1856
1835 return ptr - data; 1857 return ptr - data;
1836} 1858}
1837 1859
1838static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result) 1860static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1839{ 1861{
1840 struct l2cap_pinfo *pi = l2cap_pi(sk);
1841 struct l2cap_conf_req *req = data; 1862 struct l2cap_conf_req *req = data;
1842 void *ptr = req->data; 1863 void *ptr = req->data;
1843 int type, olen; 1864 int type, olen;
1844 unsigned long val; 1865 unsigned long val;
1845 struct l2cap_conf_rfc rfc; 1866 struct l2cap_conf_rfc rfc;
1846 1867
1847 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data); 1868 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
1848 1869
1849 while (len >= L2CAP_CONF_OPT_SIZE) { 1870 while (len >= L2CAP_CONF_OPT_SIZE) {
1850 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 1871 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
@@ -1853,27 +1874,27 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
1853 case L2CAP_CONF_MTU: 1874 case L2CAP_CONF_MTU:
1854 if (val < L2CAP_DEFAULT_MIN_MTU) { 1875 if (val < L2CAP_DEFAULT_MIN_MTU) {
1855 *result = L2CAP_CONF_UNACCEPT; 1876 *result = L2CAP_CONF_UNACCEPT;
1856 pi->imtu = L2CAP_DEFAULT_MIN_MTU; 1877 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
1857 } else 1878 } else
1858 pi->imtu = val; 1879 chan->imtu = val;
1859 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 1880 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1860 break; 1881 break;
1861 1882
1862 case L2CAP_CONF_FLUSH_TO: 1883 case L2CAP_CONF_FLUSH_TO:
1863 pi->flush_to = val; 1884 chan->flush_to = val;
1864 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 1885 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1865 2, pi->flush_to); 1886 2, chan->flush_to);
1866 break; 1887 break;
1867 1888
1868 case L2CAP_CONF_RFC: 1889 case L2CAP_CONF_RFC:
1869 if (olen == sizeof(rfc)) 1890 if (olen == sizeof(rfc))
1870 memcpy(&rfc, (void *)val, olen); 1891 memcpy(&rfc, (void *)val, olen);
1871 1892
1872 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) && 1893 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1873 rfc.mode != pi->mode) 1894 rfc.mode != chan->mode)
1874 return -ECONNREFUSED; 1895 return -ECONNREFUSED;
1875 1896
1876 pi->fcs = 0; 1897 chan->fcs = 0;
1877 1898
1878 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1899 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1879 sizeof(rfc), (unsigned long) &rfc); 1900 sizeof(rfc), (unsigned long) &rfc);
@@ -1881,53 +1902,74 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
1881 } 1902 }
1882 } 1903 }
1883 1904
1884 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode) 1905 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
1885 return -ECONNREFUSED; 1906 return -ECONNREFUSED;
1886 1907
1887 pi->mode = rfc.mode; 1908 chan->mode = rfc.mode;
1888 1909
1889 if (*result == L2CAP_CONF_SUCCESS) { 1910 if (*result == L2CAP_CONF_SUCCESS) {
1890 switch (rfc.mode) { 1911 switch (rfc.mode) {
1891 case L2CAP_MODE_ERTM: 1912 case L2CAP_MODE_ERTM:
1892 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 1913 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1893 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 1914 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1894 pi->mps = le16_to_cpu(rfc.max_pdu_size); 1915 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1895 break; 1916 break;
1896 case L2CAP_MODE_STREAMING: 1917 case L2CAP_MODE_STREAMING:
1897 pi->mps = le16_to_cpu(rfc.max_pdu_size); 1918 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1898 } 1919 }
1899 } 1920 }
1900 1921
1901 req->dcid = cpu_to_le16(pi->dcid); 1922 req->dcid = cpu_to_le16(chan->dcid);
1902 req->flags = cpu_to_le16(0x0000); 1923 req->flags = cpu_to_le16(0x0000);
1903 1924
1904 return ptr - data; 1925 return ptr - data;
1905} 1926}
1906 1927
1907static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags) 1928static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
1908{ 1929{
1909 struct l2cap_conf_rsp *rsp = data; 1930 struct l2cap_conf_rsp *rsp = data;
1910 void *ptr = rsp->data; 1931 void *ptr = rsp->data;
1911 1932
1912 BT_DBG("sk %p", sk); 1933 BT_DBG("chan %p", chan);
1913 1934
1914 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid); 1935 rsp->scid = cpu_to_le16(chan->dcid);
1915 rsp->result = cpu_to_le16(result); 1936 rsp->result = cpu_to_le16(result);
1916 rsp->flags = cpu_to_le16(flags); 1937 rsp->flags = cpu_to_le16(flags);
1917 1938
1918 return ptr - data; 1939 return ptr - data;
1919} 1940}
1920 1941
1921static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len) 1942void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
1943{
1944 struct l2cap_conn_rsp rsp;
1945 struct l2cap_conn *conn = chan->conn;
1946 u8 buf[128];
1947
1948 rsp.scid = cpu_to_le16(chan->dcid);
1949 rsp.dcid = cpu_to_le16(chan->scid);
1950 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1951 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1952 l2cap_send_cmd(conn, chan->ident,
1953 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1954
1955 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
1956 return;
1957
1958 chan->conf_state |= L2CAP_CONF_REQ_SENT;
1959 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1960 l2cap_build_conf_req(chan, buf), buf);
1961 chan->num_conf_req++;
1962}
1963
1964static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
1922{ 1965{
1923 struct l2cap_pinfo *pi = l2cap_pi(sk);
1924 int type, olen; 1966 int type, olen;
1925 unsigned long val; 1967 unsigned long val;
1926 struct l2cap_conf_rfc rfc; 1968 struct l2cap_conf_rfc rfc;
1927 1969
1928 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len); 1970 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
1929 1971
1930 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING)) 1972 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
1931 return; 1973 return;
1932 1974
1933 while (len >= L2CAP_CONF_OPT_SIZE) { 1975 while (len >= L2CAP_CONF_OPT_SIZE) {
@@ -1944,12 +1986,12 @@ static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1944done: 1986done:
1945 switch (rfc.mode) { 1987 switch (rfc.mode) {
1946 case L2CAP_MODE_ERTM: 1988 case L2CAP_MODE_ERTM:
1947 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 1989 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1948 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 1990 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1949 pi->mps = le16_to_cpu(rfc.max_pdu_size); 1991 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1950 break; 1992 break;
1951 case L2CAP_MODE_STREAMING: 1993 case L2CAP_MODE_STREAMING:
1952 pi->mps = le16_to_cpu(rfc.max_pdu_size); 1994 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1953 } 1995 }
1954} 1996}
1955 1997
@@ -1975,9 +2017,9 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
1975 2017
1976static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 2018static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1977{ 2019{
1978 struct l2cap_chan_list *list = &conn->chan_list;
1979 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 2020 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1980 struct l2cap_conn_rsp rsp; 2021 struct l2cap_conn_rsp rsp;
2022 struct l2cap_chan *chan = NULL;
1981 struct sock *parent, *sk = NULL; 2023 struct sock *parent, *sk = NULL;
1982 int result, status = L2CAP_CS_NO_INFO; 2024 int result, status = L2CAP_CS_NO_INFO;
1983 2025
@@ -2015,11 +2057,19 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2015 if (!sk) 2057 if (!sk)
2016 goto response; 2058 goto response;
2017 2059
2018 write_lock_bh(&list->lock); 2060 chan = l2cap_chan_alloc(sk);
2061 if (!chan) {
2062 l2cap_sock_kill(sk);
2063 goto response;
2064 }
2065
2066 l2cap_pi(sk)->chan = chan;
2067
2068 write_lock_bh(&conn->chan_lock);
2019 2069
2020 /* Check if we already have channel with that dcid */ 2070 /* Check if we already have channel with that dcid */
2021 if (__l2cap_get_chan_by_dcid(list, scid)) { 2071 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2022 write_unlock_bh(&list->lock); 2072 write_unlock_bh(&conn->chan_lock);
2023 sock_set_flag(sk, SOCK_ZAPPED); 2073 sock_set_flag(sk, SOCK_ZAPPED);
2024 l2cap_sock_kill(sk); 2074 l2cap_sock_kill(sk);
2025 goto response; 2075 goto response;
@@ -2030,18 +2080,21 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2030 l2cap_sock_init(sk, parent); 2080 l2cap_sock_init(sk, parent);
2031 bacpy(&bt_sk(sk)->src, conn->src); 2081 bacpy(&bt_sk(sk)->src, conn->src);
2032 bacpy(&bt_sk(sk)->dst, conn->dst); 2082 bacpy(&bt_sk(sk)->dst, conn->dst);
2033 l2cap_pi(sk)->psm = psm; 2083 chan->psm = psm;
2034 l2cap_pi(sk)->dcid = scid; 2084 chan->dcid = scid;
2085
2086 bt_accept_enqueue(parent, sk);
2035 2087
2036 __l2cap_chan_add(conn, sk, parent); 2088 __l2cap_chan_add(conn, chan);
2037 dcid = l2cap_pi(sk)->scid; 2089
2090 dcid = chan->scid;
2038 2091
2039 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 2092 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2040 2093
2041 l2cap_pi(sk)->ident = cmd->ident; 2094 chan->ident = cmd->ident;
2042 2095
2043 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 2096 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2044 if (l2cap_check_security(sk)) { 2097 if (l2cap_check_security(chan)) {
2045 if (bt_sk(sk)->defer_setup) { 2098 if (bt_sk(sk)->defer_setup) {
2046 sk->sk_state = BT_CONNECT2; 2099 sk->sk_state = BT_CONNECT2;
2047 result = L2CAP_CR_PEND; 2100 result = L2CAP_CR_PEND;
@@ -2063,7 +2116,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2063 status = L2CAP_CS_NO_INFO; 2116 status = L2CAP_CS_NO_INFO;
2064 } 2117 }
2065 2118
2066 write_unlock_bh(&list->lock); 2119 write_unlock_bh(&conn->chan_lock);
2067 2120
2068response: 2121response:
2069 bh_unlock_sock(parent); 2122 bh_unlock_sock(parent);
@@ -2089,13 +2142,13 @@ sendresp:
2089 L2CAP_INFO_REQ, sizeof(info), &info); 2142 L2CAP_INFO_REQ, sizeof(info), &info);
2090 } 2143 }
2091 2144
2092 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) && 2145 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2093 result == L2CAP_CR_SUCCESS) { 2146 result == L2CAP_CR_SUCCESS) {
2094 u8 buf[128]; 2147 u8 buf[128];
2095 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2148 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2096 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2149 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2097 l2cap_build_conf_req(sk, buf), buf); 2150 l2cap_build_conf_req(chan, buf), buf);
2098 l2cap_pi(sk)->num_conf_req++; 2151 chan->num_conf_req++;
2099 } 2152 }
2100 2153
2101 return 0; 2154 return 0;
@@ -2105,6 +2158,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2105{ 2158{
2106 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 2159 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2107 u16 scid, dcid, result, status; 2160 u16 scid, dcid, result, status;
2161 struct l2cap_chan *chan;
2108 struct sock *sk; 2162 struct sock *sk;
2109 u8 req[128]; 2163 u8 req[128];
2110 2164
@@ -2116,34 +2170,36 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2116 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); 2170 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2117 2171
2118 if (scid) { 2172 if (scid) {
2119 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); 2173 chan = l2cap_get_chan_by_scid(conn, scid);
2120 if (!sk) 2174 if (!chan)
2121 return -EFAULT; 2175 return -EFAULT;
2122 } else { 2176 } else {
2123 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident); 2177 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2124 if (!sk) 2178 if (!chan)
2125 return -EFAULT; 2179 return -EFAULT;
2126 } 2180 }
2127 2181
2182 sk = chan->sk;
2183
2128 switch (result) { 2184 switch (result) {
2129 case L2CAP_CR_SUCCESS: 2185 case L2CAP_CR_SUCCESS:
2130 sk->sk_state = BT_CONFIG; 2186 sk->sk_state = BT_CONFIG;
2131 l2cap_pi(sk)->ident = 0; 2187 chan->ident = 0;
2132 l2cap_pi(sk)->dcid = dcid; 2188 chan->dcid = dcid;
2133 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 2189 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2134 2190
2135 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) 2191 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2136 break; 2192 break;
2137 2193
2138 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2194 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2139 2195
2140 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2196 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2141 l2cap_build_conf_req(sk, req), req); 2197 l2cap_build_conf_req(chan, req), req);
2142 l2cap_pi(sk)->num_conf_req++; 2198 chan->num_conf_req++;
2143 break; 2199 break;
2144 2200
2145 case L2CAP_CR_PEND: 2201 case L2CAP_CR_PEND:
2146 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 2202 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2147 break; 2203 break;
2148 2204
2149 default: 2205 default:
@@ -2155,7 +2211,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2155 break; 2211 break;
2156 } 2212 }
2157 2213
2158 l2cap_chan_del(sk, ECONNREFUSED); 2214 l2cap_chan_del(chan, ECONNREFUSED);
2159 break; 2215 break;
2160 } 2216 }
2161 2217
@@ -2163,15 +2219,17 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2163 return 0; 2219 return 0;
2164} 2220}
2165 2221
2166static inline void set_default_fcs(struct l2cap_pinfo *pi) 2222static inline void set_default_fcs(struct l2cap_chan *chan)
2167{ 2223{
2224 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2225
2168 /* FCS is enabled only in ERTM or streaming mode, if one or both 2226 /* FCS is enabled only in ERTM or streaming mode, if one or both
2169 * sides request it. 2227 * sides request it.
2170 */ 2228 */
2171 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING) 2229 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2172 pi->fcs = L2CAP_FCS_NONE; 2230 chan->fcs = L2CAP_FCS_NONE;
2173 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV)) 2231 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2174 pi->fcs = L2CAP_FCS_CRC16; 2232 chan->fcs = L2CAP_FCS_CRC16;
2175} 2233}
2176 2234
2177static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 2235static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
@@ -2179,6 +2237,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2179 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; 2237 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2180 u16 dcid, flags; 2238 u16 dcid, flags;
2181 u8 rsp[64]; 2239 u8 rsp[64];
2240 struct l2cap_chan *chan;
2182 struct sock *sk; 2241 struct sock *sk;
2183 int len; 2242 int len;
2184 2243
@@ -2187,10 +2246,12 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2187 2246
2188 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); 2247 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2189 2248
2190 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); 2249 chan = l2cap_get_chan_by_scid(conn, dcid);
2191 if (!sk) 2250 if (!chan)
2192 return -ENOENT; 2251 return -ENOENT;
2193 2252
2253 sk = chan->sk;
2254
2194 if (sk->sk_state != BT_CONFIG) { 2255 if (sk->sk_state != BT_CONFIG) {
2195 struct l2cap_cmd_rej rej; 2256 struct l2cap_cmd_rej rej;
2196 2257
@@ -2202,62 +2263,62 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2202 2263
2203 /* Reject if config buffer is too small. */ 2264 /* Reject if config buffer is too small. */
2204 len = cmd_len - sizeof(*req); 2265 len = cmd_len - sizeof(*req);
2205 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) { 2266 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2206 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2267 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2207 l2cap_build_conf_rsp(sk, rsp, 2268 l2cap_build_conf_rsp(chan, rsp,
2208 L2CAP_CONF_REJECT, flags), rsp); 2269 L2CAP_CONF_REJECT, flags), rsp);
2209 goto unlock; 2270 goto unlock;
2210 } 2271 }
2211 2272
2212 /* Store config. */ 2273 /* Store config. */
2213 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len); 2274 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2214 l2cap_pi(sk)->conf_len += len; 2275 chan->conf_len += len;
2215 2276
2216 if (flags & 0x0001) { 2277 if (flags & 0x0001) {
2217 /* Incomplete config. Send empty response. */ 2278 /* Incomplete config. Send empty response. */
2218 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2279 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2219 l2cap_build_conf_rsp(sk, rsp, 2280 l2cap_build_conf_rsp(chan, rsp,
2220 L2CAP_CONF_SUCCESS, 0x0001), rsp); 2281 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2221 goto unlock; 2282 goto unlock;
2222 } 2283 }
2223 2284
2224 /* Complete config. */ 2285 /* Complete config. */
2225 len = l2cap_parse_conf_req(sk, rsp); 2286 len = l2cap_parse_conf_req(chan, rsp);
2226 if (len < 0) { 2287 if (len < 0) {
2227 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2288 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2228 goto unlock; 2289 goto unlock;
2229 } 2290 }
2230 2291
2231 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 2292 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2232 l2cap_pi(sk)->num_conf_rsp++; 2293 chan->num_conf_rsp++;
2233 2294
2234 /* Reset config buffer. */ 2295 /* Reset config buffer. */
2235 l2cap_pi(sk)->conf_len = 0; 2296 chan->conf_len = 0;
2236 2297
2237 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE)) 2298 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2238 goto unlock; 2299 goto unlock;
2239 2300
2240 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { 2301 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2241 set_default_fcs(l2cap_pi(sk)); 2302 set_default_fcs(chan);
2242 2303
2243 sk->sk_state = BT_CONNECTED; 2304 sk->sk_state = BT_CONNECTED;
2244 2305
2245 l2cap_pi(sk)->next_tx_seq = 0; 2306 chan->next_tx_seq = 0;
2246 l2cap_pi(sk)->expected_tx_seq = 0; 2307 chan->expected_tx_seq = 0;
2247 __skb_queue_head_init(TX_QUEUE(sk)); 2308 skb_queue_head_init(&chan->tx_q);
2248 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) 2309 if (chan->mode == L2CAP_MODE_ERTM)
2249 l2cap_ertm_init(sk); 2310 l2cap_ertm_init(chan);
2250 2311
2251 l2cap_chan_ready(sk); 2312 l2cap_chan_ready(sk);
2252 goto unlock; 2313 goto unlock;
2253 } 2314 }
2254 2315
2255 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { 2316 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2256 u8 buf[64]; 2317 u8 buf[64];
2257 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2318 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2258 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2319 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2259 l2cap_build_conf_req(sk, buf), buf); 2320 l2cap_build_conf_req(chan, buf), buf);
2260 l2cap_pi(sk)->num_conf_req++; 2321 chan->num_conf_req++;
2261 } 2322 }
2262 2323
2263unlock: 2324unlock:
@@ -2269,6 +2330,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2269{ 2330{
2270 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 2331 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2271 u16 scid, flags, result; 2332 u16 scid, flags, result;
2333 struct l2cap_chan *chan;
2272 struct sock *sk; 2334 struct sock *sk;
2273 int len = cmd->len - sizeof(*rsp); 2335 int len = cmd->len - sizeof(*rsp);
2274 2336
@@ -2279,36 +2341,38 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2279 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", 2341 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2280 scid, flags, result); 2342 scid, flags, result);
2281 2343
2282 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); 2344 chan = l2cap_get_chan_by_scid(conn, scid);
2283 if (!sk) 2345 if (!chan)
2284 return 0; 2346 return 0;
2285 2347
2348 sk = chan->sk;
2349
2286 switch (result) { 2350 switch (result) {
2287 case L2CAP_CONF_SUCCESS: 2351 case L2CAP_CONF_SUCCESS:
2288 l2cap_conf_rfc_get(sk, rsp->data, len); 2352 l2cap_conf_rfc_get(chan, rsp->data, len);
2289 break; 2353 break;
2290 2354
2291 case L2CAP_CONF_UNACCEPT: 2355 case L2CAP_CONF_UNACCEPT:
2292 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 2356 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2293 char req[64]; 2357 char req[64];
2294 2358
2295 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { 2359 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2296 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2360 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2297 goto done; 2361 goto done;
2298 } 2362 }
2299 2363
2300 /* throw out any old stored conf requests */ 2364 /* throw out any old stored conf requests */
2301 result = L2CAP_CONF_SUCCESS; 2365 result = L2CAP_CONF_SUCCESS;
2302 len = l2cap_parse_conf_rsp(sk, rsp->data, 2366 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2303 len, req, &result); 2367 req, &result);
2304 if (len < 0) { 2368 if (len < 0) {
2305 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2369 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2306 goto done; 2370 goto done;
2307 } 2371 }
2308 2372
2309 l2cap_send_cmd(conn, l2cap_get_ident(conn), 2373 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2310 L2CAP_CONF_REQ, len, req); 2374 L2CAP_CONF_REQ, len, req);
2311 l2cap_pi(sk)->num_conf_req++; 2375 chan->num_conf_req++;
2312 if (result != L2CAP_CONF_SUCCESS) 2376 if (result != L2CAP_CONF_SUCCESS)
2313 goto done; 2377 goto done;
2314 break; 2378 break;
@@ -2317,24 +2381,24 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2317 default: 2381 default:
2318 sk->sk_err = ECONNRESET; 2382 sk->sk_err = ECONNRESET;
2319 l2cap_sock_set_timer(sk, HZ * 5); 2383 l2cap_sock_set_timer(sk, HZ * 5);
2320 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2384 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2321 goto done; 2385 goto done;
2322 } 2386 }
2323 2387
2324 if (flags & 0x01) 2388 if (flags & 0x01)
2325 goto done; 2389 goto done;
2326 2390
2327 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; 2391 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2328 2392
2329 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { 2393 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2330 set_default_fcs(l2cap_pi(sk)); 2394 set_default_fcs(chan);
2331 2395
2332 sk->sk_state = BT_CONNECTED; 2396 sk->sk_state = BT_CONNECTED;
2333 l2cap_pi(sk)->next_tx_seq = 0; 2397 chan->next_tx_seq = 0;
2334 l2cap_pi(sk)->expected_tx_seq = 0; 2398 chan->expected_tx_seq = 0;
2335 __skb_queue_head_init(TX_QUEUE(sk)); 2399 skb_queue_head_init(&chan->tx_q);
2336 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) 2400 if (chan->mode == L2CAP_MODE_ERTM)
2337 l2cap_ertm_init(sk); 2401 l2cap_ertm_init(chan);
2338 2402
2339 l2cap_chan_ready(sk); 2403 l2cap_chan_ready(sk);
2340 } 2404 }
@@ -2349,6 +2413,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2349 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; 2413 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2350 struct l2cap_disconn_rsp rsp; 2414 struct l2cap_disconn_rsp rsp;
2351 u16 dcid, scid; 2415 u16 dcid, scid;
2416 struct l2cap_chan *chan;
2352 struct sock *sk; 2417 struct sock *sk;
2353 2418
2354 scid = __le16_to_cpu(req->scid); 2419 scid = __le16_to_cpu(req->scid);
@@ -2356,12 +2421,14 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2356 2421
2357 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); 2422 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2358 2423
2359 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); 2424 chan = l2cap_get_chan_by_scid(conn, dcid);
2360 if (!sk) 2425 if (!chan)
2361 return 0; 2426 return 0;
2362 2427
2363 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 2428 sk = chan->sk;
2364 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 2429
2430 rsp.dcid = cpu_to_le16(chan->scid);
2431 rsp.scid = cpu_to_le16(chan->dcid);
2365 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); 2432 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2366 2433
2367 sk->sk_shutdown = SHUTDOWN_MASK; 2434 sk->sk_shutdown = SHUTDOWN_MASK;
@@ -2375,7 +2442,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2375 return 0; 2442 return 0;
2376 } 2443 }
2377 2444
2378 l2cap_chan_del(sk, ECONNRESET); 2445 l2cap_chan_del(chan, ECONNRESET);
2379 bh_unlock_sock(sk); 2446 bh_unlock_sock(sk);
2380 2447
2381 l2cap_sock_kill(sk); 2448 l2cap_sock_kill(sk);
@@ -2386,6 +2453,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2386{ 2453{
2387 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 2454 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2388 u16 dcid, scid; 2455 u16 dcid, scid;
2456 struct l2cap_chan *chan;
2389 struct sock *sk; 2457 struct sock *sk;
2390 2458
2391 scid = __le16_to_cpu(rsp->scid); 2459 scid = __le16_to_cpu(rsp->scid);
@@ -2393,10 +2461,12 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2393 2461
2394 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); 2462 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2395 2463
2396 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); 2464 chan = l2cap_get_chan_by_scid(conn, scid);
2397 if (!sk) 2465 if (!chan)
2398 return 0; 2466 return 0;
2399 2467
2468 sk = chan->sk;
2469
2400 /* don't delete l2cap channel if sk is owned by user */ 2470 /* don't delete l2cap channel if sk is owned by user */
2401 if (sock_owned_by_user(sk)) { 2471 if (sock_owned_by_user(sk)) {
2402 sk->sk_state = BT_DISCONN; 2472 sk->sk_state = BT_DISCONN;
@@ -2406,7 +2476,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2406 return 0; 2476 return 0;
2407 } 2477 }
2408 2478
2409 l2cap_chan_del(sk, 0); 2479 l2cap_chan_del(chan, 0);
2410 bh_unlock_sock(sk); 2480 bh_unlock_sock(sk);
2411 2481
2412 l2cap_sock_kill(sk); 2482 l2cap_sock_kill(sk);
@@ -2463,6 +2533,11 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
2463 2533
2464 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 2534 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2465 2535
2536 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2537 if (cmd->ident != conn->info_ident ||
2538 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2539 return 0;
2540
2466 del_timer(&conn->info_timer); 2541 del_timer(&conn->info_timer);
2467 2542
2468 if (result != L2CAP_IR_SUCCESS) { 2543 if (result != L2CAP_IR_SUCCESS) {
@@ -2673,7 +2748,8 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2673 2748
2674 if (err) { 2749 if (err) {
2675 struct l2cap_cmd_rej rej; 2750 struct l2cap_cmd_rej rej;
2676 BT_DBG("error %d", err); 2751
2752 BT_ERR("Wrong link type (%d)", err);
2677 2753
2678 /* FIXME: Map err to a valid reason */ 2754 /* FIXME: Map err to a valid reason */
2679 rej.reason = cpu_to_le16(0); 2755 rej.reason = cpu_to_le16(0);
@@ -2687,12 +2763,12 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2687 kfree_skb(skb); 2763 kfree_skb(skb);
2688} 2764}
2689 2765
2690static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb) 2766static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
2691{ 2767{
2692 u16 our_fcs, rcv_fcs; 2768 u16 our_fcs, rcv_fcs;
2693 int hdr_size = L2CAP_HDR_SIZE + 2; 2769 int hdr_size = L2CAP_HDR_SIZE + 2;
2694 2770
2695 if (pi->fcs == L2CAP_FCS_CRC16) { 2771 if (chan->fcs == L2CAP_FCS_CRC16) {
2696 skb_trim(skb, skb->len - 2); 2772 skb_trim(skb, skb->len - 2);
2697 rcv_fcs = get_unaligned_le16(skb->data + skb->len); 2773 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2698 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 2774 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
@@ -2703,49 +2779,47 @@ static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2703 return 0; 2779 return 0;
2704} 2780}
2705 2781
2706static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk) 2782static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2707{ 2783{
2708 struct l2cap_pinfo *pi = l2cap_pi(sk);
2709 u16 control = 0; 2784 u16 control = 0;
2710 2785
2711 pi->frames_sent = 0; 2786 chan->frames_sent = 0;
2712 2787
2713 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 2788 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2714 2789
2715 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 2790 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2716 control |= L2CAP_SUPER_RCV_NOT_READY; 2791 control |= L2CAP_SUPER_RCV_NOT_READY;
2717 l2cap_send_sframe(pi, control); 2792 l2cap_send_sframe(chan, control);
2718 pi->conn_state |= L2CAP_CONN_RNR_SENT; 2793 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2719 } 2794 }
2720 2795
2721 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY) 2796 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2722 l2cap_retransmit_frames(sk); 2797 l2cap_retransmit_frames(chan);
2723 2798
2724 l2cap_ertm_send(sk); 2799 l2cap_ertm_send(chan);
2725 2800
2726 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && 2801 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2727 pi->frames_sent == 0) { 2802 chan->frames_sent == 0) {
2728 control |= L2CAP_SUPER_RCV_READY; 2803 control |= L2CAP_SUPER_RCV_READY;
2729 l2cap_send_sframe(pi, control); 2804 l2cap_send_sframe(chan, control);
2730 } 2805 }
2731} 2806}
2732 2807
2733static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar) 2808static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2734{ 2809{
2735 struct sk_buff *next_skb; 2810 struct sk_buff *next_skb;
2736 struct l2cap_pinfo *pi = l2cap_pi(sk);
2737 int tx_seq_offset, next_tx_seq_offset; 2811 int tx_seq_offset, next_tx_seq_offset;
2738 2812
2739 bt_cb(skb)->tx_seq = tx_seq; 2813 bt_cb(skb)->tx_seq = tx_seq;
2740 bt_cb(skb)->sar = sar; 2814 bt_cb(skb)->sar = sar;
2741 2815
2742 next_skb = skb_peek(SREJ_QUEUE(sk)); 2816 next_skb = skb_peek(&chan->srej_q);
2743 if (!next_skb) { 2817 if (!next_skb) {
2744 __skb_queue_tail(SREJ_QUEUE(sk), skb); 2818 __skb_queue_tail(&chan->srej_q, skb);
2745 return 0; 2819 return 0;
2746 } 2820 }
2747 2821
2748 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64; 2822 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2749 if (tx_seq_offset < 0) 2823 if (tx_seq_offset < 0)
2750 tx_seq_offset += 64; 2824 tx_seq_offset += 64;
2751 2825
@@ -2754,53 +2828,52 @@ static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_s
2754 return -EINVAL; 2828 return -EINVAL;
2755 2829
2756 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq - 2830 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2757 pi->buffer_seq) % 64; 2831 chan->buffer_seq) % 64;
2758 if (next_tx_seq_offset < 0) 2832 if (next_tx_seq_offset < 0)
2759 next_tx_seq_offset += 64; 2833 next_tx_seq_offset += 64;
2760 2834
2761 if (next_tx_seq_offset > tx_seq_offset) { 2835 if (next_tx_seq_offset > tx_seq_offset) {
2762 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb); 2836 __skb_queue_before(&chan->srej_q, next_skb, skb);
2763 return 0; 2837 return 0;
2764 } 2838 }
2765 2839
2766 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb)) 2840 if (skb_queue_is_last(&chan->srej_q, next_skb))
2767 break; 2841 break;
2768 2842
2769 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb))); 2843 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2770 2844
2771 __skb_queue_tail(SREJ_QUEUE(sk), skb); 2845 __skb_queue_tail(&chan->srej_q, skb);
2772 2846
2773 return 0; 2847 return 0;
2774} 2848}
2775 2849
2776static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) 2850static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2777{ 2851{
2778 struct l2cap_pinfo *pi = l2cap_pi(sk);
2779 struct sk_buff *_skb; 2852 struct sk_buff *_skb;
2780 int err; 2853 int err;
2781 2854
2782 switch (control & L2CAP_CTRL_SAR) { 2855 switch (control & L2CAP_CTRL_SAR) {
2783 case L2CAP_SDU_UNSEGMENTED: 2856 case L2CAP_SDU_UNSEGMENTED:
2784 if (pi->conn_state & L2CAP_CONN_SAR_SDU) 2857 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2785 goto drop; 2858 goto drop;
2786 2859
2787 err = sock_queue_rcv_skb(sk, skb); 2860 err = sock_queue_rcv_skb(chan->sk, skb);
2788 if (!err) 2861 if (!err)
2789 return err; 2862 return err;
2790 2863
2791 break; 2864 break;
2792 2865
2793 case L2CAP_SDU_START: 2866 case L2CAP_SDU_START:
2794 if (pi->conn_state & L2CAP_CONN_SAR_SDU) 2867 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2795 goto drop; 2868 goto drop;
2796 2869
2797 pi->sdu_len = get_unaligned_le16(skb->data); 2870 chan->sdu_len = get_unaligned_le16(skb->data);
2798 2871
2799 if (pi->sdu_len > pi->imtu) 2872 if (chan->sdu_len > chan->imtu)
2800 goto disconnect; 2873 goto disconnect;
2801 2874
2802 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); 2875 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2803 if (!pi->sdu) 2876 if (!chan->sdu)
2804 return -ENOMEM; 2877 return -ENOMEM;
2805 2878
2806 /* pull sdu_len bytes only after alloc, because of Local Busy 2879 /* pull sdu_len bytes only after alloc, because of Local Busy
@@ -2808,63 +2881,63 @@ static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 c
2808 * only once, i.e., when alloc does not fail */ 2881 * only once, i.e., when alloc does not fail */
2809 skb_pull(skb, 2); 2882 skb_pull(skb, 2);
2810 2883
2811 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2884 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2812 2885
2813 pi->conn_state |= L2CAP_CONN_SAR_SDU; 2886 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2814 pi->partial_sdu_len = skb->len; 2887 chan->partial_sdu_len = skb->len;
2815 break; 2888 break;
2816 2889
2817 case L2CAP_SDU_CONTINUE: 2890 case L2CAP_SDU_CONTINUE:
2818 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 2891 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2819 goto disconnect; 2892 goto disconnect;
2820 2893
2821 if (!pi->sdu) 2894 if (!chan->sdu)
2822 goto disconnect; 2895 goto disconnect;
2823 2896
2824 pi->partial_sdu_len += skb->len; 2897 chan->partial_sdu_len += skb->len;
2825 if (pi->partial_sdu_len > pi->sdu_len) 2898 if (chan->partial_sdu_len > chan->sdu_len)
2826 goto drop; 2899 goto drop;
2827 2900
2828 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2901 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2829 2902
2830 break; 2903 break;
2831 2904
2832 case L2CAP_SDU_END: 2905 case L2CAP_SDU_END:
2833 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 2906 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2834 goto disconnect; 2907 goto disconnect;
2835 2908
2836 if (!pi->sdu) 2909 if (!chan->sdu)
2837 goto disconnect; 2910 goto disconnect;
2838 2911
2839 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) { 2912 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2840 pi->partial_sdu_len += skb->len; 2913 chan->partial_sdu_len += skb->len;
2841 2914
2842 if (pi->partial_sdu_len > pi->imtu) 2915 if (chan->partial_sdu_len > chan->imtu)
2843 goto drop; 2916 goto drop;
2844 2917
2845 if (pi->partial_sdu_len != pi->sdu_len) 2918 if (chan->partial_sdu_len != chan->sdu_len)
2846 goto drop; 2919 goto drop;
2847 2920
2848 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2921 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2849 } 2922 }
2850 2923
2851 _skb = skb_clone(pi->sdu, GFP_ATOMIC); 2924 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2852 if (!_skb) { 2925 if (!_skb) {
2853 pi->conn_state |= L2CAP_CONN_SAR_RETRY; 2926 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2854 return -ENOMEM; 2927 return -ENOMEM;
2855 } 2928 }
2856 2929
2857 err = sock_queue_rcv_skb(sk, _skb); 2930 err = sock_queue_rcv_skb(chan->sk, _skb);
2858 if (err < 0) { 2931 if (err < 0) {
2859 kfree_skb(_skb); 2932 kfree_skb(_skb);
2860 pi->conn_state |= L2CAP_CONN_SAR_RETRY; 2933 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2861 return err; 2934 return err;
2862 } 2935 }
2863 2936
2864 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY; 2937 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2865 pi->conn_state &= ~L2CAP_CONN_SAR_SDU; 2938 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2866 2939
2867 kfree_skb(pi->sdu); 2940 kfree_skb(chan->sdu);
2868 break; 2941 break;
2869 } 2942 }
2870 2943
@@ -2872,51 +2945,50 @@ static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 c
2872 return 0; 2945 return 0;
2873 2946
2874drop: 2947drop:
2875 kfree_skb(pi->sdu); 2948 kfree_skb(chan->sdu);
2876 pi->sdu = NULL; 2949 chan->sdu = NULL;
2877 2950
2878disconnect: 2951disconnect:
2879 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 2952 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
2880 kfree_skb(skb); 2953 kfree_skb(skb);
2881 return 0; 2954 return 0;
2882} 2955}
2883 2956
2884static int l2cap_try_push_rx_skb(struct sock *sk) 2957static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2885{ 2958{
2886 struct l2cap_pinfo *pi = l2cap_pi(sk);
2887 struct sk_buff *skb; 2959 struct sk_buff *skb;
2888 u16 control; 2960 u16 control;
2889 int err; 2961 int err;
2890 2962
2891 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) { 2963 while ((skb = skb_dequeue(&chan->busy_q))) {
2892 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 2964 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2893 err = l2cap_ertm_reassembly_sdu(sk, skb, control); 2965 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2894 if (err < 0) { 2966 if (err < 0) {
2895 skb_queue_head(BUSY_QUEUE(sk), skb); 2967 skb_queue_head(&chan->busy_q, skb);
2896 return -EBUSY; 2968 return -EBUSY;
2897 } 2969 }
2898 2970
2899 pi->buffer_seq = (pi->buffer_seq + 1) % 64; 2971 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2900 } 2972 }
2901 2973
2902 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT)) 2974 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2903 goto done; 2975 goto done;
2904 2976
2905 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 2977 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2906 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; 2978 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2907 l2cap_send_sframe(pi, control); 2979 l2cap_send_sframe(chan, control);
2908 l2cap_pi(sk)->retry_count = 1; 2980 chan->retry_count = 1;
2909 2981
2910 del_timer(&pi->retrans_timer); 2982 del_timer(&chan->retrans_timer);
2911 __mod_monitor_timer(); 2983 __mod_monitor_timer();
2912 2984
2913 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; 2985 chan->conn_state |= L2CAP_CONN_WAIT_F;
2914 2986
2915done: 2987done:
2916 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY; 2988 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2917 pi->conn_state &= ~L2CAP_CONN_RNR_SENT; 2989 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
2918 2990
2919 BT_DBG("sk %p, Exit local busy", sk); 2991 BT_DBG("chan %p, Exit local busy", chan);
2920 2992
2921 return 0; 2993 return 0;
2922} 2994}
@@ -2924,21 +2996,21 @@ done:
2924static void l2cap_busy_work(struct work_struct *work) 2996static void l2cap_busy_work(struct work_struct *work)
2925{ 2997{
2926 DECLARE_WAITQUEUE(wait, current); 2998 DECLARE_WAITQUEUE(wait, current);
2927 struct l2cap_pinfo *pi = 2999 struct l2cap_chan *chan =
2928 container_of(work, struct l2cap_pinfo, busy_work); 3000 container_of(work, struct l2cap_chan, busy_work);
2929 struct sock *sk = (struct sock *)pi; 3001 struct sock *sk = chan->sk;
2930 int n_tries = 0, timeo = HZ/5, err; 3002 int n_tries = 0, timeo = HZ/5, err;
2931 struct sk_buff *skb; 3003 struct sk_buff *skb;
2932 3004
2933 lock_sock(sk); 3005 lock_sock(sk);
2934 3006
2935 add_wait_queue(sk_sleep(sk), &wait); 3007 add_wait_queue(sk_sleep(sk), &wait);
2936 while ((skb = skb_peek(BUSY_QUEUE(sk)))) { 3008 while ((skb = skb_peek(&chan->busy_q))) {
2937 set_current_state(TASK_INTERRUPTIBLE); 3009 set_current_state(TASK_INTERRUPTIBLE);
2938 3010
2939 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) { 3011 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2940 err = -EBUSY; 3012 err = -EBUSY;
2941 l2cap_send_disconn_req(pi->conn, sk, EBUSY); 3013 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
2942 break; 3014 break;
2943 } 3015 }
2944 3016
@@ -2958,7 +3030,7 @@ static void l2cap_busy_work(struct work_struct *work)
2958 if (err) 3030 if (err)
2959 break; 3031 break;
2960 3032
2961 if (l2cap_try_push_rx_skb(sk) == 0) 3033 if (l2cap_try_push_rx_skb(chan) == 0)
2962 break; 3034 break;
2963 } 3035 }
2964 3036
@@ -2968,48 +3040,46 @@ static void l2cap_busy_work(struct work_struct *work)
2968 release_sock(sk); 3040 release_sock(sk);
2969} 3041}
2970 3042
2971static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control) 3043static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2972{ 3044{
2973 struct l2cap_pinfo *pi = l2cap_pi(sk);
2974 int sctrl, err; 3045 int sctrl, err;
2975 3046
2976 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 3047 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2977 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; 3048 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2978 __skb_queue_tail(BUSY_QUEUE(sk), skb); 3049 __skb_queue_tail(&chan->busy_q, skb);
2979 return l2cap_try_push_rx_skb(sk); 3050 return l2cap_try_push_rx_skb(chan);
2980 3051
2981 3052
2982 } 3053 }
2983 3054
2984 err = l2cap_ertm_reassembly_sdu(sk, skb, control); 3055 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2985 if (err >= 0) { 3056 if (err >= 0) {
2986 pi->buffer_seq = (pi->buffer_seq + 1) % 64; 3057 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2987 return err; 3058 return err;
2988 } 3059 }
2989 3060
2990 /* Busy Condition */ 3061 /* Busy Condition */
2991 BT_DBG("sk %p, Enter local busy", sk); 3062 BT_DBG("chan %p, Enter local busy", chan);
2992 3063
2993 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY; 3064 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2994 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; 3065 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2995 __skb_queue_tail(BUSY_QUEUE(sk), skb); 3066 __skb_queue_tail(&chan->busy_q, skb);
2996 3067
2997 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3068 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2998 sctrl |= L2CAP_SUPER_RCV_NOT_READY; 3069 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2999 l2cap_send_sframe(pi, sctrl); 3070 l2cap_send_sframe(chan, sctrl);
3000 3071
3001 pi->conn_state |= L2CAP_CONN_RNR_SENT; 3072 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3002 3073
3003 del_timer(&pi->ack_timer); 3074 del_timer(&chan->ack_timer);
3004 3075
3005 queue_work(_busy_wq, &pi->busy_work); 3076 queue_work(_busy_wq, &chan->busy_work);
3006 3077
3007 return err; 3078 return err;
3008} 3079}
3009 3080
3010static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) 3081static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3011{ 3082{
3012 struct l2cap_pinfo *pi = l2cap_pi(sk);
3013 struct sk_buff *_skb; 3083 struct sk_buff *_skb;
3014 int err = -EINVAL; 3084 int err = -EINVAL;
3015 3085
@@ -3020,80 +3090,80 @@ static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb,
3020 3090
3021 switch (control & L2CAP_CTRL_SAR) { 3091 switch (control & L2CAP_CTRL_SAR) {
3022 case L2CAP_SDU_UNSEGMENTED: 3092 case L2CAP_SDU_UNSEGMENTED:
3023 if (pi->conn_state & L2CAP_CONN_SAR_SDU) { 3093 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3024 kfree_skb(pi->sdu); 3094 kfree_skb(chan->sdu);
3025 break; 3095 break;
3026 } 3096 }
3027 3097
3028 err = sock_queue_rcv_skb(sk, skb); 3098 err = sock_queue_rcv_skb(chan->sk, skb);
3029 if (!err) 3099 if (!err)
3030 return 0; 3100 return 0;
3031 3101
3032 break; 3102 break;
3033 3103
3034 case L2CAP_SDU_START: 3104 case L2CAP_SDU_START:
3035 if (pi->conn_state & L2CAP_CONN_SAR_SDU) { 3105 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3036 kfree_skb(pi->sdu); 3106 kfree_skb(chan->sdu);
3037 break; 3107 break;
3038 } 3108 }
3039 3109
3040 pi->sdu_len = get_unaligned_le16(skb->data); 3110 chan->sdu_len = get_unaligned_le16(skb->data);
3041 skb_pull(skb, 2); 3111 skb_pull(skb, 2);
3042 3112
3043 if (pi->sdu_len > pi->imtu) { 3113 if (chan->sdu_len > chan->imtu) {
3044 err = -EMSGSIZE; 3114 err = -EMSGSIZE;
3045 break; 3115 break;
3046 } 3116 }
3047 3117
3048 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); 3118 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3049 if (!pi->sdu) { 3119 if (!chan->sdu) {
3050 err = -ENOMEM; 3120 err = -ENOMEM;
3051 break; 3121 break;
3052 } 3122 }
3053 3123
3054 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 3124 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3055 3125
3056 pi->conn_state |= L2CAP_CONN_SAR_SDU; 3126 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3057 pi->partial_sdu_len = skb->len; 3127 chan->partial_sdu_len = skb->len;
3058 err = 0; 3128 err = 0;
3059 break; 3129 break;
3060 3130
3061 case L2CAP_SDU_CONTINUE: 3131 case L2CAP_SDU_CONTINUE:
3062 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 3132 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3063 break; 3133 break;
3064 3134
3065 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 3135 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3066 3136
3067 pi->partial_sdu_len += skb->len; 3137 chan->partial_sdu_len += skb->len;
3068 if (pi->partial_sdu_len > pi->sdu_len) 3138 if (chan->partial_sdu_len > chan->sdu_len)
3069 kfree_skb(pi->sdu); 3139 kfree_skb(chan->sdu);
3070 else 3140 else
3071 err = 0; 3141 err = 0;
3072 3142
3073 break; 3143 break;
3074 3144
3075 case L2CAP_SDU_END: 3145 case L2CAP_SDU_END:
3076 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 3146 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3077 break; 3147 break;
3078 3148
3079 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 3149 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3080 3150
3081 pi->conn_state &= ~L2CAP_CONN_SAR_SDU; 3151 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3082 pi->partial_sdu_len += skb->len; 3152 chan->partial_sdu_len += skb->len;
3083 3153
3084 if (pi->partial_sdu_len > pi->imtu) 3154 if (chan->partial_sdu_len > chan->imtu)
3085 goto drop; 3155 goto drop;
3086 3156
3087 if (pi->partial_sdu_len == pi->sdu_len) { 3157 if (chan->partial_sdu_len == chan->sdu_len) {
3088 _skb = skb_clone(pi->sdu, GFP_ATOMIC); 3158 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3089 err = sock_queue_rcv_skb(sk, _skb); 3159 err = sock_queue_rcv_skb(chan->sk, _skb);
3090 if (err < 0) 3160 if (err < 0)
3091 kfree_skb(_skb); 3161 kfree_skb(_skb);
3092 } 3162 }
3093 err = 0; 3163 err = 0;
3094 3164
3095drop: 3165drop:
3096 kfree_skb(pi->sdu); 3166 kfree_skb(chan->sdu);
3097 break; 3167 break;
3098 } 3168 }
3099 3169
@@ -3101,31 +3171,30 @@ drop:
3101 return err; 3171 return err;
3102} 3172}
3103 3173
3104static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq) 3174static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3105{ 3175{
3106 struct sk_buff *skb; 3176 struct sk_buff *skb;
3107 u16 control; 3177 u16 control;
3108 3178
3109 while ((skb = skb_peek(SREJ_QUEUE(sk)))) { 3179 while ((skb = skb_peek(&chan->srej_q))) {
3110 if (bt_cb(skb)->tx_seq != tx_seq) 3180 if (bt_cb(skb)->tx_seq != tx_seq)
3111 break; 3181 break;
3112 3182
3113 skb = skb_dequeue(SREJ_QUEUE(sk)); 3183 skb = skb_dequeue(&chan->srej_q);
3114 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3184 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3115 l2cap_ertm_reassembly_sdu(sk, skb, control); 3185 l2cap_ertm_reassembly_sdu(chan, skb, control);
3116 l2cap_pi(sk)->buffer_seq_srej = 3186 chan->buffer_seq_srej =
3117 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64; 3187 (chan->buffer_seq_srej + 1) % 64;
3118 tx_seq = (tx_seq + 1) % 64; 3188 tx_seq = (tx_seq + 1) % 64;
3119 } 3189 }
3120} 3190}
3121 3191
3122static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq) 3192static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3123{ 3193{
3124 struct l2cap_pinfo *pi = l2cap_pi(sk);
3125 struct srej_list *l, *tmp; 3194 struct srej_list *l, *tmp;
3126 u16 control; 3195 u16 control;
3127 3196
3128 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) { 3197 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3129 if (l->tx_seq == tx_seq) { 3198 if (l->tx_seq == tx_seq) {
3130 list_del(&l->list); 3199 list_del(&l->list);
3131 kfree(l); 3200 kfree(l);
@@ -3133,107 +3202,105 @@ static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3133 } 3202 }
3134 control = L2CAP_SUPER_SELECT_REJECT; 3203 control = L2CAP_SUPER_SELECT_REJECT;
3135 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3204 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3136 l2cap_send_sframe(pi, control); 3205 l2cap_send_sframe(chan, control);
3137 list_del(&l->list); 3206 list_del(&l->list);
3138 list_add_tail(&l->list, SREJ_LIST(sk)); 3207 list_add_tail(&l->list, &chan->srej_l);
3139 } 3208 }
3140} 3209}
3141 3210
3142static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq) 3211static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3143{ 3212{
3144 struct l2cap_pinfo *pi = l2cap_pi(sk);
3145 struct srej_list *new; 3213 struct srej_list *new;
3146 u16 control; 3214 u16 control;
3147 3215
3148 while (tx_seq != pi->expected_tx_seq) { 3216 while (tx_seq != chan->expected_tx_seq) {
3149 control = L2CAP_SUPER_SELECT_REJECT; 3217 control = L2CAP_SUPER_SELECT_REJECT;
3150 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3218 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3151 l2cap_send_sframe(pi, control); 3219 l2cap_send_sframe(chan, control);
3152 3220
3153 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 3221 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3154 new->tx_seq = pi->expected_tx_seq; 3222 new->tx_seq = chan->expected_tx_seq;
3155 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3223 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3156 list_add_tail(&new->list, SREJ_LIST(sk)); 3224 list_add_tail(&new->list, &chan->srej_l);
3157 } 3225 }
3158 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3226 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3159} 3227}
3160 3228
3161static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) 3229static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3162{ 3230{
3163 struct l2cap_pinfo *pi = l2cap_pi(sk);
3164 u8 tx_seq = __get_txseq(rx_control); 3231 u8 tx_seq = __get_txseq(rx_control);
3165 u8 req_seq = __get_reqseq(rx_control); 3232 u8 req_seq = __get_reqseq(rx_control);
3166 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; 3233 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3167 int tx_seq_offset, expected_tx_seq_offset; 3234 int tx_seq_offset, expected_tx_seq_offset;
3168 int num_to_ack = (pi->tx_win/6) + 1; 3235 int num_to_ack = (chan->tx_win/6) + 1;
3169 int err = 0; 3236 int err = 0;
3170 3237
3171 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq, 3238 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3172 rx_control); 3239 tx_seq, rx_control);
3173 3240
3174 if (L2CAP_CTRL_FINAL & rx_control && 3241 if (L2CAP_CTRL_FINAL & rx_control &&
3175 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) { 3242 chan->conn_state & L2CAP_CONN_WAIT_F) {
3176 del_timer(&pi->monitor_timer); 3243 del_timer(&chan->monitor_timer);
3177 if (pi->unacked_frames > 0) 3244 if (chan->unacked_frames > 0)
3178 __mod_retrans_timer(); 3245 __mod_retrans_timer();
3179 pi->conn_state &= ~L2CAP_CONN_WAIT_F; 3246 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3180 } 3247 }
3181 3248
3182 pi->expected_ack_seq = req_seq; 3249 chan->expected_ack_seq = req_seq;
3183 l2cap_drop_acked_frames(sk); 3250 l2cap_drop_acked_frames(chan);
3184 3251
3185 if (tx_seq == pi->expected_tx_seq) 3252 if (tx_seq == chan->expected_tx_seq)
3186 goto expected; 3253 goto expected;
3187 3254
3188 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64; 3255 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3189 if (tx_seq_offset < 0) 3256 if (tx_seq_offset < 0)
3190 tx_seq_offset += 64; 3257 tx_seq_offset += 64;
3191 3258
3192 /* invalid tx_seq */ 3259 /* invalid tx_seq */
3193 if (tx_seq_offset >= pi->tx_win) { 3260 if (tx_seq_offset >= chan->tx_win) {
3194 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3261 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3195 goto drop; 3262 goto drop;
3196 } 3263 }
3197 3264
3198 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY) 3265 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3199 goto drop; 3266 goto drop;
3200 3267
3201 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 3268 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3202 struct srej_list *first; 3269 struct srej_list *first;
3203 3270
3204 first = list_first_entry(SREJ_LIST(sk), 3271 first = list_first_entry(&chan->srej_l,
3205 struct srej_list, list); 3272 struct srej_list, list);
3206 if (tx_seq == first->tx_seq) { 3273 if (tx_seq == first->tx_seq) {
3207 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 3274 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3208 l2cap_check_srej_gap(sk, tx_seq); 3275 l2cap_check_srej_gap(chan, tx_seq);
3209 3276
3210 list_del(&first->list); 3277 list_del(&first->list);
3211 kfree(first); 3278 kfree(first);
3212 3279
3213 if (list_empty(SREJ_LIST(sk))) { 3280 if (list_empty(&chan->srej_l)) {
3214 pi->buffer_seq = pi->buffer_seq_srej; 3281 chan->buffer_seq = chan->buffer_seq_srej;
3215 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT; 3282 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3216 l2cap_send_ack(pi); 3283 l2cap_send_ack(chan);
3217 BT_DBG("sk %p, Exit SREJ_SENT", sk); 3284 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3218 } 3285 }
3219 } else { 3286 } else {
3220 struct srej_list *l; 3287 struct srej_list *l;
3221 3288
3222 /* duplicated tx_seq */ 3289 /* duplicated tx_seq */
3223 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0) 3290 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3224 goto drop; 3291 goto drop;
3225 3292
3226 list_for_each_entry(l, SREJ_LIST(sk), list) { 3293 list_for_each_entry(l, &chan->srej_l, list) {
3227 if (l->tx_seq == tx_seq) { 3294 if (l->tx_seq == tx_seq) {
3228 l2cap_resend_srejframe(sk, tx_seq); 3295 l2cap_resend_srejframe(chan, tx_seq);
3229 return 0; 3296 return 0;
3230 } 3297 }
3231 } 3298 }
3232 l2cap_send_srejframe(sk, tx_seq); 3299 l2cap_send_srejframe(chan, tx_seq);
3233 } 3300 }
3234 } else { 3301 } else {
3235 expected_tx_seq_offset = 3302 expected_tx_seq_offset =
3236 (pi->expected_tx_seq - pi->buffer_seq) % 64; 3303 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3237 if (expected_tx_seq_offset < 0) 3304 if (expected_tx_seq_offset < 0)
3238 expected_tx_seq_offset += 64; 3305 expected_tx_seq_offset += 64;
3239 3306
@@ -3241,51 +3308,51 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3241 if (tx_seq_offset < expected_tx_seq_offset) 3308 if (tx_seq_offset < expected_tx_seq_offset)
3242 goto drop; 3309 goto drop;
3243 3310
3244 pi->conn_state |= L2CAP_CONN_SREJ_SENT; 3311 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3245 3312
3246 BT_DBG("sk %p, Enter SREJ", sk); 3313 BT_DBG("chan %p, Enter SREJ", chan);
3247 3314
3248 INIT_LIST_HEAD(SREJ_LIST(sk)); 3315 INIT_LIST_HEAD(&chan->srej_l);
3249 pi->buffer_seq_srej = pi->buffer_seq; 3316 chan->buffer_seq_srej = chan->buffer_seq;
3250 3317
3251 __skb_queue_head_init(SREJ_QUEUE(sk)); 3318 __skb_queue_head_init(&chan->srej_q);
3252 __skb_queue_head_init(BUSY_QUEUE(sk)); 3319 __skb_queue_head_init(&chan->busy_q);
3253 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 3320 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3254 3321
3255 pi->conn_state |= L2CAP_CONN_SEND_PBIT; 3322 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3256 3323
3257 l2cap_send_srejframe(sk, tx_seq); 3324 l2cap_send_srejframe(chan, tx_seq);
3258 3325
3259 del_timer(&pi->ack_timer); 3326 del_timer(&chan->ack_timer);
3260 } 3327 }
3261 return 0; 3328 return 0;
3262 3329
3263expected: 3330expected:
3264 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3331 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3265 3332
3266 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 3333 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3267 bt_cb(skb)->tx_seq = tx_seq; 3334 bt_cb(skb)->tx_seq = tx_seq;
3268 bt_cb(skb)->sar = sar; 3335 bt_cb(skb)->sar = sar;
3269 __skb_queue_tail(SREJ_QUEUE(sk), skb); 3336 __skb_queue_tail(&chan->srej_q, skb);
3270 return 0; 3337 return 0;
3271 } 3338 }
3272 3339
3273 err = l2cap_push_rx_skb(sk, skb, rx_control); 3340 err = l2cap_push_rx_skb(chan, skb, rx_control);
3274 if (err < 0) 3341 if (err < 0)
3275 return 0; 3342 return 0;
3276 3343
3277 if (rx_control & L2CAP_CTRL_FINAL) { 3344 if (rx_control & L2CAP_CTRL_FINAL) {
3278 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 3345 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3279 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 3346 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3280 else 3347 else
3281 l2cap_retransmit_frames(sk); 3348 l2cap_retransmit_frames(chan);
3282 } 3349 }
3283 3350
3284 __mod_ack_timer(); 3351 __mod_ack_timer();
3285 3352
3286 pi->num_acked = (pi->num_acked + 1) % num_to_ack; 3353 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3287 if (pi->num_acked == num_to_ack - 1) 3354 if (chan->num_acked == num_to_ack - 1)
3288 l2cap_send_ack(pi); 3355 l2cap_send_ack(chan);
3289 3356
3290 return 0; 3357 return 0;
3291 3358
@@ -3294,165 +3361,160 @@ drop:
3294 return 0; 3361 return 0;
3295} 3362}
3296 3363
3297static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control) 3364static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3298{ 3365{
3299 struct l2cap_pinfo *pi = l2cap_pi(sk); 3366 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3300
3301 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3302 rx_control); 3367 rx_control);
3303 3368
3304 pi->expected_ack_seq = __get_reqseq(rx_control); 3369 chan->expected_ack_seq = __get_reqseq(rx_control);
3305 l2cap_drop_acked_frames(sk); 3370 l2cap_drop_acked_frames(chan);
3306 3371
3307 if (rx_control & L2CAP_CTRL_POLL) { 3372 if (rx_control & L2CAP_CTRL_POLL) {
3308 pi->conn_state |= L2CAP_CONN_SEND_FBIT; 3373 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3309 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 3374 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3310 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3375 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3311 (pi->unacked_frames > 0)) 3376 (chan->unacked_frames > 0))
3312 __mod_retrans_timer(); 3377 __mod_retrans_timer();
3313 3378
3314 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3379 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3315 l2cap_send_srejtail(sk); 3380 l2cap_send_srejtail(chan);
3316 } else { 3381 } else {
3317 l2cap_send_i_or_rr_or_rnr(sk); 3382 l2cap_send_i_or_rr_or_rnr(chan);
3318 } 3383 }
3319 3384
3320 } else if (rx_control & L2CAP_CTRL_FINAL) { 3385 } else if (rx_control & L2CAP_CTRL_FINAL) {
3321 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3386 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3322 3387
3323 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 3388 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3324 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 3389 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3325 else 3390 else
3326 l2cap_retransmit_frames(sk); 3391 l2cap_retransmit_frames(chan);
3327 3392
3328 } else { 3393 } else {
3329 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3394 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3330 (pi->unacked_frames > 0)) 3395 (chan->unacked_frames > 0))
3331 __mod_retrans_timer(); 3396 __mod_retrans_timer();
3332 3397
3333 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3398 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3334 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) 3399 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3335 l2cap_send_ack(pi); 3400 l2cap_send_ack(chan);
3336 else 3401 else
3337 l2cap_ertm_send(sk); 3402 l2cap_ertm_send(chan);
3338 } 3403 }
3339} 3404}
3340 3405
3341static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control) 3406static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3342{ 3407{
3343 struct l2cap_pinfo *pi = l2cap_pi(sk);
3344 u8 tx_seq = __get_reqseq(rx_control); 3408 u8 tx_seq = __get_reqseq(rx_control);
3345 3409
3346 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); 3410 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3347 3411
3348 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3412 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3349 3413
3350 pi->expected_ack_seq = tx_seq; 3414 chan->expected_ack_seq = tx_seq;
3351 l2cap_drop_acked_frames(sk); 3415 l2cap_drop_acked_frames(chan);
3352 3416
3353 if (rx_control & L2CAP_CTRL_FINAL) { 3417 if (rx_control & L2CAP_CTRL_FINAL) {
3354 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 3418 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3355 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 3419 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3356 else 3420 else
3357 l2cap_retransmit_frames(sk); 3421 l2cap_retransmit_frames(chan);
3358 } else { 3422 } else {
3359 l2cap_retransmit_frames(sk); 3423 l2cap_retransmit_frames(chan);
3360 3424
3361 if (pi->conn_state & L2CAP_CONN_WAIT_F) 3425 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3362 pi->conn_state |= L2CAP_CONN_REJ_ACT; 3426 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3363 } 3427 }
3364} 3428}
3365static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control) 3429static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3366{ 3430{
3367 struct l2cap_pinfo *pi = l2cap_pi(sk);
3368 u8 tx_seq = __get_reqseq(rx_control); 3431 u8 tx_seq = __get_reqseq(rx_control);
3369 3432
3370 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); 3433 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3371 3434
3372 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3435 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3373 3436
3374 if (rx_control & L2CAP_CTRL_POLL) { 3437 if (rx_control & L2CAP_CTRL_POLL) {
3375 pi->expected_ack_seq = tx_seq; 3438 chan->expected_ack_seq = tx_seq;
3376 l2cap_drop_acked_frames(sk); 3439 l2cap_drop_acked_frames(chan);
3377 3440
3378 pi->conn_state |= L2CAP_CONN_SEND_FBIT; 3441 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3379 l2cap_retransmit_one_frame(sk, tx_seq); 3442 l2cap_retransmit_one_frame(chan, tx_seq);
3380 3443
3381 l2cap_ertm_send(sk); 3444 l2cap_ertm_send(chan);
3382 3445
3383 if (pi->conn_state & L2CAP_CONN_WAIT_F) { 3446 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3384 pi->srej_save_reqseq = tx_seq; 3447 chan->srej_save_reqseq = tx_seq;
3385 pi->conn_state |= L2CAP_CONN_SREJ_ACT; 3448 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3386 } 3449 }
3387 } else if (rx_control & L2CAP_CTRL_FINAL) { 3450 } else if (rx_control & L2CAP_CTRL_FINAL) {
3388 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) && 3451 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3389 pi->srej_save_reqseq == tx_seq) 3452 chan->srej_save_reqseq == tx_seq)
3390 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT; 3453 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3391 else 3454 else
3392 l2cap_retransmit_one_frame(sk, tx_seq); 3455 l2cap_retransmit_one_frame(chan, tx_seq);
3393 } else { 3456 } else {
3394 l2cap_retransmit_one_frame(sk, tx_seq); 3457 l2cap_retransmit_one_frame(chan, tx_seq);
3395 if (pi->conn_state & L2CAP_CONN_WAIT_F) { 3458 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3396 pi->srej_save_reqseq = tx_seq; 3459 chan->srej_save_reqseq = tx_seq;
3397 pi->conn_state |= L2CAP_CONN_SREJ_ACT; 3460 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3398 } 3461 }
3399 } 3462 }
3400} 3463}
3401 3464
3402static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control) 3465static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3403{ 3466{
3404 struct l2cap_pinfo *pi = l2cap_pi(sk);
3405 u8 tx_seq = __get_reqseq(rx_control); 3467 u8 tx_seq = __get_reqseq(rx_control);
3406 3468
3407 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); 3469 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3408 3470
3409 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY; 3471 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3410 pi->expected_ack_seq = tx_seq; 3472 chan->expected_ack_seq = tx_seq;
3411 l2cap_drop_acked_frames(sk); 3473 l2cap_drop_acked_frames(chan);
3412 3474
3413 if (rx_control & L2CAP_CTRL_POLL) 3475 if (rx_control & L2CAP_CTRL_POLL)
3414 pi->conn_state |= L2CAP_CONN_SEND_FBIT; 3476 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3415 3477
3416 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) { 3478 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3417 del_timer(&pi->retrans_timer); 3479 del_timer(&chan->retrans_timer);
3418 if (rx_control & L2CAP_CTRL_POLL) 3480 if (rx_control & L2CAP_CTRL_POLL)
3419 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL); 3481 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3420 return; 3482 return;
3421 } 3483 }
3422 3484
3423 if (rx_control & L2CAP_CTRL_POLL) 3485 if (rx_control & L2CAP_CTRL_POLL)
3424 l2cap_send_srejtail(sk); 3486 l2cap_send_srejtail(chan);
3425 else 3487 else
3426 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY); 3488 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3427} 3489}
3428 3490
3429static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) 3491static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3430{ 3492{
3431 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); 3493 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3432 3494
3433 if (L2CAP_CTRL_FINAL & rx_control && 3495 if (L2CAP_CTRL_FINAL & rx_control &&
3434 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) { 3496 chan->conn_state & L2CAP_CONN_WAIT_F) {
3435 del_timer(&l2cap_pi(sk)->monitor_timer); 3497 del_timer(&chan->monitor_timer);
3436 if (l2cap_pi(sk)->unacked_frames > 0) 3498 if (chan->unacked_frames > 0)
3437 __mod_retrans_timer(); 3499 __mod_retrans_timer();
3438 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F; 3500 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3439 } 3501 }
3440 3502
3441 switch (rx_control & L2CAP_CTRL_SUPERVISE) { 3503 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3442 case L2CAP_SUPER_RCV_READY: 3504 case L2CAP_SUPER_RCV_READY:
3443 l2cap_data_channel_rrframe(sk, rx_control); 3505 l2cap_data_channel_rrframe(chan, rx_control);
3444 break; 3506 break;
3445 3507
3446 case L2CAP_SUPER_REJECT: 3508 case L2CAP_SUPER_REJECT:
3447 l2cap_data_channel_rejframe(sk, rx_control); 3509 l2cap_data_channel_rejframe(chan, rx_control);
3448 break; 3510 break;
3449 3511
3450 case L2CAP_SUPER_SELECT_REJECT: 3512 case L2CAP_SUPER_SELECT_REJECT:
3451 l2cap_data_channel_srejframe(sk, rx_control); 3513 l2cap_data_channel_srejframe(chan, rx_control);
3452 break; 3514 break;
3453 3515
3454 case L2CAP_SUPER_RCV_NOT_READY: 3516 case L2CAP_SUPER_RCV_NOT_READY:
3455 l2cap_data_channel_rnrframe(sk, rx_control); 3517 l2cap_data_channel_rnrframe(chan, rx_control);
3456 break; 3518 break;
3457 } 3519 }
3458 3520
@@ -3462,7 +3524,7 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
3462 3524
3463static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) 3525static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3464{ 3526{
3465 struct l2cap_pinfo *pi = l2cap_pi(sk); 3527 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3466 u16 control; 3528 u16 control;
3467 u8 req_seq; 3529 u8 req_seq;
3468 int len, next_tx_seq_offset, req_seq_offset; 3530 int len, next_tx_seq_offset, req_seq_offset;
@@ -3476,51 +3538,51 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3476 * Receiver will miss it and start proper recovery 3538 * Receiver will miss it and start proper recovery
3477 * procedures and ask retransmission. 3539 * procedures and ask retransmission.
3478 */ 3540 */
3479 if (l2cap_check_fcs(pi, skb)) 3541 if (l2cap_check_fcs(chan, skb))
3480 goto drop; 3542 goto drop;
3481 3543
3482 if (__is_sar_start(control) && __is_iframe(control)) 3544 if (__is_sar_start(control) && __is_iframe(control))
3483 len -= 2; 3545 len -= 2;
3484 3546
3485 if (pi->fcs == L2CAP_FCS_CRC16) 3547 if (chan->fcs == L2CAP_FCS_CRC16)
3486 len -= 2; 3548 len -= 2;
3487 3549
3488 if (len > pi->mps) { 3550 if (len > chan->mps) {
3489 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3551 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3490 goto drop; 3552 goto drop;
3491 } 3553 }
3492 3554
3493 req_seq = __get_reqseq(control); 3555 req_seq = __get_reqseq(control);
3494 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64; 3556 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3495 if (req_seq_offset < 0) 3557 if (req_seq_offset < 0)
3496 req_seq_offset += 64; 3558 req_seq_offset += 64;
3497 3559
3498 next_tx_seq_offset = 3560 next_tx_seq_offset =
3499 (pi->next_tx_seq - pi->expected_ack_seq) % 64; 3561 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3500 if (next_tx_seq_offset < 0) 3562 if (next_tx_seq_offset < 0)
3501 next_tx_seq_offset += 64; 3563 next_tx_seq_offset += 64;
3502 3564
3503 /* check for invalid req-seq */ 3565 /* check for invalid req-seq */
3504 if (req_seq_offset > next_tx_seq_offset) { 3566 if (req_seq_offset > next_tx_seq_offset) {
3505 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3567 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3506 goto drop; 3568 goto drop;
3507 } 3569 }
3508 3570
3509 if (__is_iframe(control)) { 3571 if (__is_iframe(control)) {
3510 if (len < 0) { 3572 if (len < 0) {
3511 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3573 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3512 goto drop; 3574 goto drop;
3513 } 3575 }
3514 3576
3515 l2cap_data_channel_iframe(sk, control, skb); 3577 l2cap_data_channel_iframe(chan, control, skb);
3516 } else { 3578 } else {
3517 if (len != 0) { 3579 if (len != 0) {
3518 BT_ERR("%d", len); 3580 BT_ERR("%d", len);
3519 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3581 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3520 goto drop; 3582 goto drop;
3521 } 3583 }
3522 3584
3523 l2cap_data_channel_sframe(sk, control, skb); 3585 l2cap_data_channel_sframe(chan, control, skb);
3524 } 3586 }
3525 3587
3526 return 0; 3588 return 0;
@@ -3532,33 +3594,35 @@ drop:
3532 3594
3533static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 3595static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3534{ 3596{
3535 struct sock *sk; 3597 struct l2cap_chan *chan;
3598 struct sock *sk = NULL;
3536 struct l2cap_pinfo *pi; 3599 struct l2cap_pinfo *pi;
3537 u16 control; 3600 u16 control;
3538 u8 tx_seq; 3601 u8 tx_seq;
3539 int len; 3602 int len;
3540 3603
3541 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 3604 chan = l2cap_get_chan_by_scid(conn, cid);
3542 if (!sk) { 3605 if (!chan) {
3543 BT_DBG("unknown cid 0x%4.4x", cid); 3606 BT_DBG("unknown cid 0x%4.4x", cid);
3544 goto drop; 3607 goto drop;
3545 } 3608 }
3546 3609
3610 sk = chan->sk;
3547 pi = l2cap_pi(sk); 3611 pi = l2cap_pi(sk);
3548 3612
3549 BT_DBG("sk %p, len %d", sk, skb->len); 3613 BT_DBG("chan %p, len %d", chan, skb->len);
3550 3614
3551 if (sk->sk_state != BT_CONNECTED) 3615 if (sk->sk_state != BT_CONNECTED)
3552 goto drop; 3616 goto drop;
3553 3617
3554 switch (pi->mode) { 3618 switch (chan->mode) {
3555 case L2CAP_MODE_BASIC: 3619 case L2CAP_MODE_BASIC:
3556 /* If socket recv buffers overflows we drop data here 3620 /* If socket recv buffers overflows we drop data here
3557 * which is *bad* because L2CAP has to be reliable. 3621 * which is *bad* because L2CAP has to be reliable.
3558 * But we don't have any other choice. L2CAP doesn't 3622 * But we don't have any other choice. L2CAP doesn't
3559 * provide flow control mechanism. */ 3623 * provide flow control mechanism. */
3560 3624
3561 if (pi->imtu < skb->len) 3625 if (chan->imtu < skb->len)
3562 goto drop; 3626 goto drop;
3563 3627
3564 if (!sock_queue_rcv_skb(sk, skb)) 3628 if (!sock_queue_rcv_skb(sk, skb))
@@ -3580,31 +3644,31 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3580 skb_pull(skb, 2); 3644 skb_pull(skb, 2);
3581 len = skb->len; 3645 len = skb->len;
3582 3646
3583 if (l2cap_check_fcs(pi, skb)) 3647 if (l2cap_check_fcs(chan, skb))
3584 goto drop; 3648 goto drop;
3585 3649
3586 if (__is_sar_start(control)) 3650 if (__is_sar_start(control))
3587 len -= 2; 3651 len -= 2;
3588 3652
3589 if (pi->fcs == L2CAP_FCS_CRC16) 3653 if (chan->fcs == L2CAP_FCS_CRC16)
3590 len -= 2; 3654 len -= 2;
3591 3655
3592 if (len > pi->mps || len < 0 || __is_sframe(control)) 3656 if (len > chan->mps || len < 0 || __is_sframe(control))
3593 goto drop; 3657 goto drop;
3594 3658
3595 tx_seq = __get_txseq(control); 3659 tx_seq = __get_txseq(control);
3596 3660
3597 if (pi->expected_tx_seq == tx_seq) 3661 if (chan->expected_tx_seq == tx_seq)
3598 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3662 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3599 else 3663 else
3600 pi->expected_tx_seq = (tx_seq + 1) % 64; 3664 chan->expected_tx_seq = (tx_seq + 1) % 64;
3601 3665
3602 l2cap_streaming_reassembly_sdu(sk, skb, control); 3666 l2cap_streaming_reassembly_sdu(chan, skb, control);
3603 3667
3604 goto done; 3668 goto done;
3605 3669
3606 default: 3670 default:
3607 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode); 3671 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3608 break; 3672 break;
3609 } 3673 }
3610 3674
@@ -3633,7 +3697,37 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
3633 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) 3697 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3634 goto drop; 3698 goto drop;
3635 3699
3636 if (l2cap_pi(sk)->imtu < skb->len) 3700 if (l2cap_pi(sk)->chan->imtu < skb->len)
3701 goto drop;
3702
3703 if (!sock_queue_rcv_skb(sk, skb))
3704 goto done;
3705
3706drop:
3707 kfree_skb(skb);
3708
3709done:
3710 if (sk)
3711 bh_unlock_sock(sk);
3712 return 0;
3713}
3714
3715static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3716{
3717 struct sock *sk;
3718
3719 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
3720 if (!sk)
3721 goto drop;
3722
3723 bh_lock_sock(sk);
3724
3725 BT_DBG("sk %p, len %d", sk, skb->len);
3726
3727 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3728 goto drop;
3729
3730 if (l2cap_pi(sk)->chan->imtu < skb->len)
3637 goto drop; 3731 goto drop;
3638 3732
3639 if (!sock_queue_rcv_skb(sk, skb)) 3733 if (!sock_queue_rcv_skb(sk, skb))
@@ -3677,6 +3771,10 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3677 l2cap_conless_channel(conn, psm, skb); 3771 l2cap_conless_channel(conn, psm, skb);
3678 break; 3772 break;
3679 3773
3774 case L2CAP_CID_LE_DATA:
3775 l2cap_att_channel(conn, cid, skb);
3776 break;
3777
3680 default: 3778 default:
3681 l2cap_data_channel(conn, cid, skb); 3779 l2cap_data_channel(conn, cid, skb);
3682 break; 3780 break;
@@ -3699,17 +3797,19 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3699 /* Find listening sockets and check their link_mode */ 3797 /* Find listening sockets and check their link_mode */
3700 read_lock(&l2cap_sk_list.lock); 3798 read_lock(&l2cap_sk_list.lock);
3701 sk_for_each(sk, node, &l2cap_sk_list.head) { 3799 sk_for_each(sk, node, &l2cap_sk_list.head) {
3800 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3801
3702 if (sk->sk_state != BT_LISTEN) 3802 if (sk->sk_state != BT_LISTEN)
3703 continue; 3803 continue;
3704 3804
3705 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { 3805 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3706 lm1 |= HCI_LM_ACCEPT; 3806 lm1 |= HCI_LM_ACCEPT;
3707 if (l2cap_pi(sk)->role_switch) 3807 if (chan->role_switch)
3708 lm1 |= HCI_LM_MASTER; 3808 lm1 |= HCI_LM_MASTER;
3709 exact++; 3809 exact++;
3710 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { 3810 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3711 lm2 |= HCI_LM_ACCEPT; 3811 lm2 |= HCI_LM_ACCEPT;
3712 if (l2cap_pi(sk)->role_switch) 3812 if (chan->role_switch)
3713 lm2 |= HCI_LM_MASTER; 3813 lm2 |= HCI_LM_MASTER;
3714 } 3814 }
3715 } 3815 }
@@ -3761,49 +3861,50 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3761 return 0; 3861 return 0;
3762} 3862}
3763 3863
3764static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt) 3864static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3765{ 3865{
3866 struct sock *sk = chan->sk;
3867
3766 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) 3868 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3767 return; 3869 return;
3768 3870
3769 if (encrypt == 0x00) { 3871 if (encrypt == 0x00) {
3770 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) { 3872 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3771 l2cap_sock_clear_timer(sk); 3873 l2cap_sock_clear_timer(sk);
3772 l2cap_sock_set_timer(sk, HZ * 5); 3874 l2cap_sock_set_timer(sk, HZ * 5);
3773 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 3875 } else if (chan->sec_level == BT_SECURITY_HIGH)
3774 __l2cap_sock_close(sk, ECONNREFUSED); 3876 __l2cap_sock_close(sk, ECONNREFUSED);
3775 } else { 3877 } else {
3776 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) 3878 if (chan->sec_level == BT_SECURITY_MEDIUM)
3777 l2cap_sock_clear_timer(sk); 3879 l2cap_sock_clear_timer(sk);
3778 } 3880 }
3779} 3881}
3780 3882
3781static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) 3883static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3782{ 3884{
3783 struct l2cap_chan_list *l;
3784 struct l2cap_conn *conn = hcon->l2cap_data; 3885 struct l2cap_conn *conn = hcon->l2cap_data;
3785 struct sock *sk; 3886 struct l2cap_chan *chan;
3786 3887
3787 if (!conn) 3888 if (!conn)
3788 return 0; 3889 return 0;
3789 3890
3790 l = &conn->chan_list;
3791
3792 BT_DBG("conn %p", conn); 3891 BT_DBG("conn %p", conn);
3793 3892
3794 read_lock(&l->lock); 3893 read_lock(&conn->chan_lock);
3894
3895 list_for_each_entry(chan, &conn->chan_l, list) {
3896 struct sock *sk = chan->sk;
3795 3897
3796 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3797 bh_lock_sock(sk); 3898 bh_lock_sock(sk);
3798 3899
3799 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) { 3900 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3800 bh_unlock_sock(sk); 3901 bh_unlock_sock(sk);
3801 continue; 3902 continue;
3802 } 3903 }
3803 3904
3804 if (!status && (sk->sk_state == BT_CONNECTED || 3905 if (!status && (sk->sk_state == BT_CONNECTED ||
3805 sk->sk_state == BT_CONFIG)) { 3906 sk->sk_state == BT_CONFIG)) {
3806 l2cap_check_encryption(sk, encrypt); 3907 l2cap_check_encryption(chan, encrypt);
3807 bh_unlock_sock(sk); 3908 bh_unlock_sock(sk);
3808 continue; 3909 continue;
3809 } 3910 }
@@ -3811,13 +3912,13 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3811 if (sk->sk_state == BT_CONNECT) { 3912 if (sk->sk_state == BT_CONNECT) {
3812 if (!status) { 3913 if (!status) {
3813 struct l2cap_conn_req req; 3914 struct l2cap_conn_req req;
3814 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 3915 req.scid = cpu_to_le16(chan->scid);
3815 req.psm = l2cap_pi(sk)->psm; 3916 req.psm = chan->psm;
3816 3917
3817 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 3918 chan->ident = l2cap_get_ident(conn);
3818 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 3919 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3819 3920
3820 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 3921 l2cap_send_cmd(conn, chan->ident,
3821 L2CAP_CONN_REQ, sizeof(req), &req); 3922 L2CAP_CONN_REQ, sizeof(req), &req);
3822 } else { 3923 } else {
3823 l2cap_sock_clear_timer(sk); 3924 l2cap_sock_clear_timer(sk);
@@ -3836,18 +3937,18 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3836 result = L2CAP_CR_SEC_BLOCK; 3937 result = L2CAP_CR_SEC_BLOCK;
3837 } 3938 }
3838 3939
3839 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 3940 rsp.scid = cpu_to_le16(chan->dcid);
3840 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 3941 rsp.dcid = cpu_to_le16(chan->scid);
3841 rsp.result = cpu_to_le16(result); 3942 rsp.result = cpu_to_le16(result);
3842 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 3943 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3843 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 3944 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3844 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 3945 sizeof(rsp), &rsp);
3845 } 3946 }
3846 3947
3847 bh_unlock_sock(sk); 3948 bh_unlock_sock(sk);
3848 } 3949 }
3849 3950
3850 read_unlock(&l->lock); 3951 read_unlock(&conn->chan_lock);
3851 3952
3852 return 0; 3953 return 0;
3853} 3954}
@@ -3866,7 +3967,7 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
3866 3967
3867 if (!(flags & ACL_CONT)) { 3968 if (!(flags & ACL_CONT)) {
3868 struct l2cap_hdr *hdr; 3969 struct l2cap_hdr *hdr;
3869 struct sock *sk; 3970 struct l2cap_chan *chan;
3870 u16 cid; 3971 u16 cid;
3871 int len; 3972 int len;
3872 3973
@@ -3904,18 +4005,21 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
3904 goto drop; 4005 goto drop;
3905 } 4006 }
3906 4007
3907 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 4008 chan = l2cap_get_chan_by_scid(conn, cid);
3908 4009
3909 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) { 4010 if (chan && chan->sk) {
3910 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)", 4011 struct sock *sk = chan->sk;
3911 len, l2cap_pi(sk)->imtu);
3912 bh_unlock_sock(sk);
3913 l2cap_conn_unreliable(conn, ECOMM);
3914 goto drop;
3915 }
3916 4012
3917 if (sk) 4013 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4014 BT_ERR("Frame exceeding recv MTU (len %d, "
4015 "MTU %d)", len,
4016 chan->imtu);
4017 bh_unlock_sock(sk);
4018 l2cap_conn_unreliable(conn, ECOMM);
4019 goto drop;
4020 }
3918 bh_unlock_sock(sk); 4021 bh_unlock_sock(sk);
4022 }
3919 4023
3920 /* Allocate skb for the complete frame (with header) */ 4024 /* Allocate skb for the complete frame (with header) */
3921 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); 4025 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
@@ -3969,14 +4073,15 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
3969 4073
3970 sk_for_each(sk, node, &l2cap_sk_list.head) { 4074 sk_for_each(sk, node, &l2cap_sk_list.head) {
3971 struct l2cap_pinfo *pi = l2cap_pi(sk); 4075 struct l2cap_pinfo *pi = l2cap_pi(sk);
4076 struct l2cap_chan *chan = pi->chan;
3972 4077
3973 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 4078 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3974 batostr(&bt_sk(sk)->src), 4079 batostr(&bt_sk(sk)->src),
3975 batostr(&bt_sk(sk)->dst), 4080 batostr(&bt_sk(sk)->dst),
3976 sk->sk_state, __le16_to_cpu(pi->psm), 4081 sk->sk_state, __le16_to_cpu(chan->psm),
3977 pi->scid, pi->dcid, 4082 chan->scid, chan->dcid,
3978 pi->imtu, pi->omtu, pi->sec_level, 4083 chan->imtu, chan->omtu, chan->sec_level,
3979 pi->mode); 4084 chan->mode);
3980 } 4085 }
3981 4086
3982 read_unlock_bh(&l2cap_sk_list.lock); 4087 read_unlock_bh(&l2cap_sk_list.lock);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 299fe56a9668..7c4a9ae9b3ce 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -30,6 +30,8 @@
30#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h> 31#include <net/bluetooth/l2cap.h>
32 32
33static const struct proto_ops l2cap_sock_ops;
34
33/* ---- L2CAP timers ---- */ 35/* ---- L2CAP timers ---- */
34static void l2cap_sock_timeout(unsigned long arg) 36static void l2cap_sock_timeout(unsigned long arg)
35{ 37{
@@ -51,7 +53,7 @@ static void l2cap_sock_timeout(unsigned long arg)
51 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG) 53 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
52 reason = ECONNREFUSED; 54 reason = ECONNREFUSED;
53 else if (sk->sk_state == BT_CONNECT && 55 else if (sk->sk_state == BT_CONNECT &&
54 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP) 56 l2cap_pi(sk)->chan->sec_level != BT_SECURITY_SDP)
55 reason = ECONNREFUSED; 57 reason = ECONNREFUSED;
56 else 58 else
57 reason = ETIMEDOUT; 59 reason = ETIMEDOUT;
@@ -80,9 +82,13 @@ static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
80{ 82{
81 struct sock *sk; 83 struct sock *sk;
82 struct hlist_node *node; 84 struct hlist_node *node;
83 sk_for_each(sk, node, &l2cap_sk_list.head) 85 sk_for_each(sk, node, &l2cap_sk_list.head) {
84 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src)) 86 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
87
88 if (chan->sport == psm && !bacmp(&bt_sk(sk)->src, src))
85 goto found; 89 goto found;
90 }
91
86 sk = NULL; 92 sk = NULL;
87found: 93found:
88 return sk; 94 return sk;
@@ -91,6 +97,7 @@ found:
91static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) 97static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
92{ 98{
93 struct sock *sk = sock->sk; 99 struct sock *sk = sock->sk;
100 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
94 struct sockaddr_l2 la; 101 struct sockaddr_l2 la;
95 int len, err = 0; 102 int len, err = 0;
96 103
@@ -136,17 +143,17 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
136 } else { 143 } else {
137 /* Save source address */ 144 /* Save source address */
138 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); 145 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
139 l2cap_pi(sk)->psm = la.l2_psm; 146 chan->psm = la.l2_psm;
140 l2cap_pi(sk)->sport = la.l2_psm; 147 chan->sport = la.l2_psm;
141 sk->sk_state = BT_BOUND; 148 sk->sk_state = BT_BOUND;
142 149
143 if (__le16_to_cpu(la.l2_psm) == 0x0001 || 150 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
144 __le16_to_cpu(la.l2_psm) == 0x0003) 151 __le16_to_cpu(la.l2_psm) == 0x0003)
145 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; 152 chan->sec_level = BT_SECURITY_SDP;
146 } 153 }
147 154
148 if (la.l2_cid) 155 if (la.l2_cid)
149 l2cap_pi(sk)->scid = la.l2_cid; 156 chan->scid = la.l2_cid;
150 157
151 write_unlock_bh(&l2cap_sk_list.lock); 158 write_unlock_bh(&l2cap_sk_list.lock);
152 159
@@ -158,6 +165,7 @@ done:
158static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) 165static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
159{ 166{
160 struct sock *sk = sock->sk; 167 struct sock *sk = sock->sk;
168 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
161 struct sockaddr_l2 la; 169 struct sockaddr_l2 la;
162 int len, err = 0; 170 int len, err = 0;
163 171
@@ -182,7 +190,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
182 goto done; 190 goto done;
183 } 191 }
184 192
185 switch (l2cap_pi(sk)->mode) { 193 switch (chan->mode) {
186 case L2CAP_MODE_BASIC: 194 case L2CAP_MODE_BASIC:
187 break; 195 break;
188 case L2CAP_MODE_ERTM: 196 case L2CAP_MODE_ERTM:
@@ -226,10 +234,10 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
226 234
227 /* Set destination address and psm */ 235 /* Set destination address and psm */
228 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); 236 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
229 l2cap_pi(sk)->psm = la.l2_psm; 237 chan->psm = la.l2_psm;
230 l2cap_pi(sk)->dcid = la.l2_cid; 238 chan->dcid = la.l2_cid;
231 239
232 err = l2cap_do_connect(sk); 240 err = l2cap_chan_connect(l2cap_pi(sk)->chan);
233 if (err) 241 if (err)
234 goto done; 242 goto done;
235 243
@@ -244,6 +252,7 @@ done:
244static int l2cap_sock_listen(struct socket *sock, int backlog) 252static int l2cap_sock_listen(struct socket *sock, int backlog)
245{ 253{
246 struct sock *sk = sock->sk; 254 struct sock *sk = sock->sk;
255 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
247 int err = 0; 256 int err = 0;
248 257
249 BT_DBG("sk %p backlog %d", sk, backlog); 258 BT_DBG("sk %p backlog %d", sk, backlog);
@@ -256,7 +265,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
256 goto done; 265 goto done;
257 } 266 }
258 267
259 switch (l2cap_pi(sk)->mode) { 268 switch (chan->mode) {
260 case L2CAP_MODE_BASIC: 269 case L2CAP_MODE_BASIC:
261 break; 270 break;
262 case L2CAP_MODE_ERTM: 271 case L2CAP_MODE_ERTM:
@@ -269,7 +278,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
269 goto done; 278 goto done;
270 } 279 }
271 280
272 if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->dcid) { 281 if (!chan->psm && !chan->scid) {
273 bdaddr_t *src = &bt_sk(sk)->src; 282 bdaddr_t *src = &bt_sk(sk)->src;
274 u16 psm; 283 u16 psm;
275 284
@@ -279,8 +288,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
279 288
280 for (psm = 0x1001; psm < 0x1100; psm += 2) 289 for (psm = 0x1001; psm < 0x1100; psm += 2)
281 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) { 290 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
282 l2cap_pi(sk)->psm = cpu_to_le16(psm); 291 chan->psm = cpu_to_le16(psm);
283 l2cap_pi(sk)->sport = cpu_to_le16(psm); 292 chan->sport = cpu_to_le16(psm);
284 err = 0; 293 err = 0;
285 break; 294 break;
286 } 295 }
@@ -360,6 +369,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
360{ 369{
361 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; 370 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
362 struct sock *sk = sock->sk; 371 struct sock *sk = sock->sk;
372 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
363 373
364 BT_DBG("sock %p, sk %p", sock, sk); 374 BT_DBG("sock %p, sk %p", sock, sk);
365 375
@@ -367,13 +377,13 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
367 *len = sizeof(struct sockaddr_l2); 377 *len = sizeof(struct sockaddr_l2);
368 378
369 if (peer) { 379 if (peer) {
370 la->l2_psm = l2cap_pi(sk)->psm; 380 la->l2_psm = chan->psm;
371 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); 381 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
372 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid); 382 la->l2_cid = cpu_to_le16(chan->dcid);
373 } else { 383 } else {
374 la->l2_psm = l2cap_pi(sk)->sport; 384 la->l2_psm = chan->sport;
375 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); 385 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
376 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid); 386 la->l2_cid = cpu_to_le16(chan->scid);
377 } 387 }
378 388
379 return 0; 389 return 0;
@@ -382,6 +392,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
382static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) 392static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
383{ 393{
384 struct sock *sk = sock->sk; 394 struct sock *sk = sock->sk;
395 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
385 struct l2cap_options opts; 396 struct l2cap_options opts;
386 struct l2cap_conninfo cinfo; 397 struct l2cap_conninfo cinfo;
387 int len, err = 0; 398 int len, err = 0;
@@ -397,13 +408,13 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
397 switch (optname) { 408 switch (optname) {
398 case L2CAP_OPTIONS: 409 case L2CAP_OPTIONS:
399 memset(&opts, 0, sizeof(opts)); 410 memset(&opts, 0, sizeof(opts));
400 opts.imtu = l2cap_pi(sk)->imtu; 411 opts.imtu = chan->imtu;
401 opts.omtu = l2cap_pi(sk)->omtu; 412 opts.omtu = chan->omtu;
402 opts.flush_to = l2cap_pi(sk)->flush_to; 413 opts.flush_to = chan->flush_to;
403 opts.mode = l2cap_pi(sk)->mode; 414 opts.mode = chan->mode;
404 opts.fcs = l2cap_pi(sk)->fcs; 415 opts.fcs = chan->fcs;
405 opts.max_tx = l2cap_pi(sk)->max_tx; 416 opts.max_tx = chan->max_tx;
406 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win; 417 opts.txwin_size = (__u16)chan->tx_win;
407 418
408 len = min_t(unsigned int, len, sizeof(opts)); 419 len = min_t(unsigned int, len, sizeof(opts));
409 if (copy_to_user(optval, (char *) &opts, len)) 420 if (copy_to_user(optval, (char *) &opts, len))
@@ -412,7 +423,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
412 break; 423 break;
413 424
414 case L2CAP_LM: 425 case L2CAP_LM:
415 switch (l2cap_pi(sk)->sec_level) { 426 switch (chan->sec_level) {
416 case BT_SECURITY_LOW: 427 case BT_SECURITY_LOW:
417 opt = L2CAP_LM_AUTH; 428 opt = L2CAP_LM_AUTH;
418 break; 429 break;
@@ -428,10 +439,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
428 break; 439 break;
429 } 440 }
430 441
431 if (l2cap_pi(sk)->role_switch) 442 if (chan->role_switch)
432 opt |= L2CAP_LM_MASTER; 443 opt |= L2CAP_LM_MASTER;
433 444
434 if (l2cap_pi(sk)->force_reliable) 445 if (chan->force_reliable)
435 opt |= L2CAP_LM_RELIABLE; 446 opt |= L2CAP_LM_RELIABLE;
436 447
437 if (put_user(opt, (u32 __user *) optval)) 448 if (put_user(opt, (u32 __user *) optval))
@@ -446,8 +457,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
446 break; 457 break;
447 } 458 }
448 459
449 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle; 460 cinfo.hci_handle = chan->conn->hcon->handle;
450 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3); 461 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
451 462
452 len = min_t(unsigned int, len, sizeof(cinfo)); 463 len = min_t(unsigned int, len, sizeof(cinfo));
453 if (copy_to_user(optval, (char *) &cinfo, len)) 464 if (copy_to_user(optval, (char *) &cinfo, len))
@@ -467,6 +478,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
467static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) 478static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
468{ 479{
469 struct sock *sk = sock->sk; 480 struct sock *sk = sock->sk;
481 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
470 struct bt_security sec; 482 struct bt_security sec;
471 int len, err = 0; 483 int len, err = 0;
472 484
@@ -491,7 +503,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
491 break; 503 break;
492 } 504 }
493 505
494 sec.level = l2cap_pi(sk)->sec_level; 506 sec.level = chan->sec_level;
495 507
496 len = min_t(unsigned int, len, sizeof(sec)); 508 len = min_t(unsigned int, len, sizeof(sec));
497 if (copy_to_user(optval, (char *) &sec, len)) 509 if (copy_to_user(optval, (char *) &sec, len))
@@ -511,7 +523,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
511 break; 523 break;
512 524
513 case BT_FLUSHABLE: 525 case BT_FLUSHABLE:
514 if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval)) 526 if (put_user(chan->flushable, (u32 __user *) optval))
515 err = -EFAULT; 527 err = -EFAULT;
516 528
517 break; 529 break;
@@ -528,6 +540,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
528static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) 540static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
529{ 541{
530 struct sock *sk = sock->sk; 542 struct sock *sk = sock->sk;
543 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
531 struct l2cap_options opts; 544 struct l2cap_options opts;
532 int len, err = 0; 545 int len, err = 0;
533 u32 opt; 546 u32 opt;
@@ -543,13 +556,13 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
543 break; 556 break;
544 } 557 }
545 558
546 opts.imtu = l2cap_pi(sk)->imtu; 559 opts.imtu = chan->imtu;
547 opts.omtu = l2cap_pi(sk)->omtu; 560 opts.omtu = chan->omtu;
548 opts.flush_to = l2cap_pi(sk)->flush_to; 561 opts.flush_to = chan->flush_to;
549 opts.mode = l2cap_pi(sk)->mode; 562 opts.mode = chan->mode;
550 opts.fcs = l2cap_pi(sk)->fcs; 563 opts.fcs = chan->fcs;
551 opts.max_tx = l2cap_pi(sk)->max_tx; 564 opts.max_tx = chan->max_tx;
552 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win; 565 opts.txwin_size = (__u16)chan->tx_win;
553 566
554 len = min_t(unsigned int, sizeof(opts), optlen); 567 len = min_t(unsigned int, sizeof(opts), optlen);
555 if (copy_from_user((char *) &opts, optval, len)) { 568 if (copy_from_user((char *) &opts, optval, len)) {
@@ -562,10 +575,10 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
562 break; 575 break;
563 } 576 }
564 577
565 l2cap_pi(sk)->mode = opts.mode; 578 chan->mode = opts.mode;
566 switch (l2cap_pi(sk)->mode) { 579 switch (chan->mode) {
567 case L2CAP_MODE_BASIC: 580 case L2CAP_MODE_BASIC:
568 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE; 581 chan->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
569 break; 582 break;
570 case L2CAP_MODE_ERTM: 583 case L2CAP_MODE_ERTM:
571 case L2CAP_MODE_STREAMING: 584 case L2CAP_MODE_STREAMING:
@@ -577,11 +590,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
577 break; 590 break;
578 } 591 }
579 592
580 l2cap_pi(sk)->imtu = opts.imtu; 593 chan->imtu = opts.imtu;
581 l2cap_pi(sk)->omtu = opts.omtu; 594 chan->omtu = opts.omtu;
582 l2cap_pi(sk)->fcs = opts.fcs; 595 chan->fcs = opts.fcs;
583 l2cap_pi(sk)->max_tx = opts.max_tx; 596 chan->max_tx = opts.max_tx;
584 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size; 597 chan->tx_win = (__u8)opts.txwin_size;
585 break; 598 break;
586 599
587 case L2CAP_LM: 600 case L2CAP_LM:
@@ -591,14 +604,14 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
591 } 604 }
592 605
593 if (opt & L2CAP_LM_AUTH) 606 if (opt & L2CAP_LM_AUTH)
594 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW; 607 chan->sec_level = BT_SECURITY_LOW;
595 if (opt & L2CAP_LM_ENCRYPT) 608 if (opt & L2CAP_LM_ENCRYPT)
596 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM; 609 chan->sec_level = BT_SECURITY_MEDIUM;
597 if (opt & L2CAP_LM_SECURE) 610 if (opt & L2CAP_LM_SECURE)
598 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH; 611 chan->sec_level = BT_SECURITY_HIGH;
599 612
600 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER); 613 chan->role_switch = (opt & L2CAP_LM_MASTER);
601 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE); 614 chan->force_reliable = (opt & L2CAP_LM_RELIABLE);
602 break; 615 break;
603 616
604 default: 617 default:
@@ -613,6 +626,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
613static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) 626static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
614{ 627{
615 struct sock *sk = sock->sk; 628 struct sock *sk = sock->sk;
629 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
616 struct bt_security sec; 630 struct bt_security sec;
617 int len, err = 0; 631 int len, err = 0;
618 u32 opt; 632 u32 opt;
@@ -649,7 +663,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
649 break; 663 break;
650 } 664 }
651 665
652 l2cap_pi(sk)->sec_level = sec.level; 666 chan->sec_level = sec.level;
653 break; 667 break;
654 668
655 case BT_DEFER_SETUP: 669 case BT_DEFER_SETUP:
@@ -678,7 +692,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
678 } 692 }
679 693
680 if (opt == BT_FLUSHABLE_OFF) { 694 if (opt == BT_FLUSHABLE_OFF) {
681 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 695 struct l2cap_conn *conn = chan->conn;
682 /* proceed further only when we have l2cap_conn and 696 /* proceed further only when we have l2cap_conn and
683 No Flush support in the LM */ 697 No Flush support in the LM */
684 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) { 698 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
@@ -687,7 +701,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
687 } 701 }
688 } 702 }
689 703
690 l2cap_pi(sk)->flushable = opt; 704 chan->flushable = opt;
691 break; 705 break;
692 706
693 default: 707 default:
@@ -702,7 +716,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
702static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) 716static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
703{ 717{
704 struct sock *sk = sock->sk; 718 struct sock *sk = sock->sk;
705 struct l2cap_pinfo *pi = l2cap_pi(sk); 719 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
706 struct sk_buff *skb; 720 struct sk_buff *skb;
707 u16 control; 721 u16 control;
708 int err; 722 int err;
@@ -725,74 +739,77 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
725 739
726 /* Connectionless channel */ 740 /* Connectionless channel */
727 if (sk->sk_type == SOCK_DGRAM) { 741 if (sk->sk_type == SOCK_DGRAM) {
728 skb = l2cap_create_connless_pdu(sk, msg, len); 742 skb = l2cap_create_connless_pdu(chan, msg, len);
729 if (IS_ERR(skb)) { 743 if (IS_ERR(skb)) {
730 err = PTR_ERR(skb); 744 err = PTR_ERR(skb);
731 } else { 745 } else {
732 l2cap_do_send(sk, skb); 746 l2cap_do_send(chan, skb);
733 err = len; 747 err = len;
734 } 748 }
735 goto done; 749 goto done;
736 } 750 }
737 751
738 switch (pi->mode) { 752 switch (chan->mode) {
739 case L2CAP_MODE_BASIC: 753 case L2CAP_MODE_BASIC:
740 /* Check outgoing MTU */ 754 /* Check outgoing MTU */
741 if (len > pi->omtu) { 755 if (len > chan->omtu) {
742 err = -EMSGSIZE; 756 err = -EMSGSIZE;
743 goto done; 757 goto done;
744 } 758 }
745 759
746 /* Create a basic PDU */ 760 /* Create a basic PDU */
747 skb = l2cap_create_basic_pdu(sk, msg, len); 761 skb = l2cap_create_basic_pdu(chan, msg, len);
748 if (IS_ERR(skb)) { 762 if (IS_ERR(skb)) {
749 err = PTR_ERR(skb); 763 err = PTR_ERR(skb);
750 goto done; 764 goto done;
751 } 765 }
752 766
753 l2cap_do_send(sk, skb); 767 l2cap_do_send(chan, skb);
754 err = len; 768 err = len;
755 break; 769 break;
756 770
757 case L2CAP_MODE_ERTM: 771 case L2CAP_MODE_ERTM:
758 case L2CAP_MODE_STREAMING: 772 case L2CAP_MODE_STREAMING:
759 /* Entire SDU fits into one PDU */ 773 /* Entire SDU fits into one PDU */
760 if (len <= pi->remote_mps) { 774 if (len <= chan->remote_mps) {
761 control = L2CAP_SDU_UNSEGMENTED; 775 control = L2CAP_SDU_UNSEGMENTED;
762 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0); 776 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
777 0);
763 if (IS_ERR(skb)) { 778 if (IS_ERR(skb)) {
764 err = PTR_ERR(skb); 779 err = PTR_ERR(skb);
765 goto done; 780 goto done;
766 } 781 }
767 __skb_queue_tail(TX_QUEUE(sk), skb); 782 __skb_queue_tail(&chan->tx_q, skb);
768 783
769 if (sk->sk_send_head == NULL) 784 if (chan->tx_send_head == NULL)
770 sk->sk_send_head = skb; 785 chan->tx_send_head = skb;
771 786
772 } else { 787 } else {
773 /* Segment SDU into multiples PDUs */ 788 /* Segment SDU into multiples PDUs */
774 err = l2cap_sar_segment_sdu(sk, msg, len); 789 err = l2cap_sar_segment_sdu(chan, msg, len);
775 if (err < 0) 790 if (err < 0)
776 goto done; 791 goto done;
777 } 792 }
778 793
779 if (pi->mode == L2CAP_MODE_STREAMING) { 794 if (chan->mode == L2CAP_MODE_STREAMING) {
780 l2cap_streaming_send(sk); 795 l2cap_streaming_send(chan);
781 } else { 796 err = len;
782 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 797 break;
783 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
784 err = len;
785 break;
786 }
787 err = l2cap_ertm_send(sk);
788 } 798 }
789 799
800 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
801 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
802 err = len;
803 break;
804 }
805 err = l2cap_ertm_send(chan);
806
790 if (err >= 0) 807 if (err >= 0)
791 err = len; 808 err = len;
792 break; 809 break;
793 810
794 default: 811 default:
795 BT_DBG("bad state %1.1x", pi->mode); 812 BT_DBG("bad state %1.1x", chan->mode);
796 err = -EBADFD; 813 err = -EBADFD;
797 } 814 }
798 815
@@ -808,29 +825,9 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
808 lock_sock(sk); 825 lock_sock(sk);
809 826
810 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { 827 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
811 struct l2cap_conn_rsp rsp;
812 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
813 u8 buf[128];
814
815 sk->sk_state = BT_CONFIG; 828 sk->sk_state = BT_CONFIG;
816 829
817 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 830 __l2cap_connect_rsp_defer(l2cap_pi(sk)->chan);
818 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
819 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
820 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
821 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
822 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
823
824 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
825 release_sock(sk);
826 return 0;
827 }
828
829 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
830 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
831 l2cap_build_conf_req(sk, buf), buf);
832 l2cap_pi(sk)->num_conf_req++;
833
834 release_sock(sk); 831 release_sock(sk);
835 return 0; 832 return 0;
836 } 833 }
@@ -854,6 +851,8 @@ void l2cap_sock_kill(struct sock *sk)
854 BT_DBG("sk %p state %d", sk, sk->sk_state); 851 BT_DBG("sk %p state %d", sk, sk->sk_state);
855 852
856 /* Kill poor orphan */ 853 /* Kill poor orphan */
854
855 l2cap_chan_free(l2cap_pi(sk)->chan);
857 bt_sock_unlink(&l2cap_sk_list, sk); 856 bt_sock_unlink(&l2cap_sk_list, sk);
858 sock_set_flag(sk, SOCK_DEAD); 857 sock_set_flag(sk, SOCK_DEAD);
859 sock_put(sk); 858 sock_put(sk);
@@ -885,7 +884,8 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
885 884
886void __l2cap_sock_close(struct sock *sk, int reason) 885void __l2cap_sock_close(struct sock *sk, int reason)
887{ 886{
888 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 887 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
888 struct l2cap_conn *conn = chan->conn;
889 889
890 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); 890 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
891 891
@@ -900,9 +900,9 @@ void __l2cap_sock_close(struct sock *sk, int reason)
900 sk->sk_type == SOCK_STREAM) && 900 sk->sk_type == SOCK_STREAM) &&
901 conn->hcon->type == ACL_LINK) { 901 conn->hcon->type == ACL_LINK) {
902 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 902 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
903 l2cap_send_disconn_req(conn, sk, reason); 903 l2cap_send_disconn_req(conn, chan, reason);
904 } else 904 } else
905 l2cap_chan_del(sk, reason); 905 l2cap_chan_del(chan, reason);
906 break; 906 break;
907 907
908 case BT_CONNECT2: 908 case BT_CONNECT2:
@@ -917,20 +917,20 @@ void __l2cap_sock_close(struct sock *sk, int reason)
917 else 917 else
918 result = L2CAP_CR_BAD_PSM; 918 result = L2CAP_CR_BAD_PSM;
919 919
920 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 920 rsp.scid = cpu_to_le16(chan->dcid);
921 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 921 rsp.dcid = cpu_to_le16(chan->scid);
922 rsp.result = cpu_to_le16(result); 922 rsp.result = cpu_to_le16(result);
923 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 923 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
924 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 924 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
925 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 925 sizeof(rsp), &rsp);
926 } 926 }
927 927
928 l2cap_chan_del(sk, reason); 928 l2cap_chan_del(chan, reason);
929 break; 929 break;
930 930
931 case BT_CONNECT: 931 case BT_CONNECT:
932 case BT_DISCONN: 932 case BT_DISCONN:
933 l2cap_chan_del(sk, reason); 933 l2cap_chan_del(chan, reason);
934 break; 934 break;
935 935
936 default: 936 default:
@@ -942,6 +942,7 @@ void __l2cap_sock_close(struct sock *sk, int reason)
942static int l2cap_sock_shutdown(struct socket *sock, int how) 942static int l2cap_sock_shutdown(struct socket *sock, int how)
943{ 943{
944 struct sock *sk = sock->sk; 944 struct sock *sk = sock->sk;
945 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
945 int err = 0; 946 int err = 0;
946 947
947 BT_DBG("sock %p, sk %p", sock, sk); 948 BT_DBG("sock %p, sk %p", sock, sk);
@@ -951,7 +952,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
951 952
952 lock_sock(sk); 953 lock_sock(sk);
953 if (!sk->sk_shutdown) { 954 if (!sk->sk_shutdown) {
954 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) 955 if (chan->mode == L2CAP_MODE_ERTM)
955 err = __l2cap_wait_ack(sk); 956 err = __l2cap_wait_ack(sk);
956 957
957 sk->sk_shutdown = SHUTDOWN_MASK; 958 sk->sk_shutdown = SHUTDOWN_MASK;
@@ -998,49 +999,47 @@ static void l2cap_sock_destruct(struct sock *sk)
998void l2cap_sock_init(struct sock *sk, struct sock *parent) 999void l2cap_sock_init(struct sock *sk, struct sock *parent)
999{ 1000{
1000 struct l2cap_pinfo *pi = l2cap_pi(sk); 1001 struct l2cap_pinfo *pi = l2cap_pi(sk);
1002 struct l2cap_chan *chan = pi->chan;
1001 1003
1002 BT_DBG("sk %p", sk); 1004 BT_DBG("sk %p", sk);
1003 1005
1004 if (parent) { 1006 if (parent) {
1007 struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
1008
1005 sk->sk_type = parent->sk_type; 1009 sk->sk_type = parent->sk_type;
1006 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; 1010 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
1007 1011
1008 pi->imtu = l2cap_pi(parent)->imtu; 1012 chan->imtu = pchan->imtu;
1009 pi->omtu = l2cap_pi(parent)->omtu; 1013 chan->omtu = pchan->omtu;
1010 pi->conf_state = l2cap_pi(parent)->conf_state; 1014 chan->conf_state = pchan->conf_state;
1011 pi->mode = l2cap_pi(parent)->mode; 1015 chan->mode = pchan->mode;
1012 pi->fcs = l2cap_pi(parent)->fcs; 1016 chan->fcs = pchan->fcs;
1013 pi->max_tx = l2cap_pi(parent)->max_tx; 1017 chan->max_tx = pchan->max_tx;
1014 pi->tx_win = l2cap_pi(parent)->tx_win; 1018 chan->tx_win = pchan->tx_win;
1015 pi->sec_level = l2cap_pi(parent)->sec_level; 1019 chan->sec_level = pchan->sec_level;
1016 pi->role_switch = l2cap_pi(parent)->role_switch; 1020 chan->role_switch = pchan->role_switch;
1017 pi->force_reliable = l2cap_pi(parent)->force_reliable; 1021 chan->force_reliable = pchan->force_reliable;
1018 pi->flushable = l2cap_pi(parent)->flushable; 1022 chan->flushable = pchan->flushable;
1019 } else { 1023 } else {
1020 pi->imtu = L2CAP_DEFAULT_MTU; 1024 chan->imtu = L2CAP_DEFAULT_MTU;
1021 pi->omtu = 0; 1025 chan->omtu = 0;
1022 if (!disable_ertm && sk->sk_type == SOCK_STREAM) { 1026 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
1023 pi->mode = L2CAP_MODE_ERTM; 1027 chan->mode = L2CAP_MODE_ERTM;
1024 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; 1028 chan->conf_state |= L2CAP_CONF_STATE2_DEVICE;
1025 } else { 1029 } else {
1026 pi->mode = L2CAP_MODE_BASIC; 1030 chan->mode = L2CAP_MODE_BASIC;
1027 } 1031 }
1028 pi->max_tx = L2CAP_DEFAULT_MAX_TX; 1032 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
1029 pi->fcs = L2CAP_FCS_CRC16; 1033 chan->fcs = L2CAP_FCS_CRC16;
1030 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW; 1034 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
1031 pi->sec_level = BT_SECURITY_LOW; 1035 chan->sec_level = BT_SECURITY_LOW;
1032 pi->role_switch = 0; 1036 chan->role_switch = 0;
1033 pi->force_reliable = 0; 1037 chan->force_reliable = 0;
1034 pi->flushable = BT_FLUSHABLE_OFF; 1038 chan->flushable = BT_FLUSHABLE_OFF;
1035 } 1039 }
1036 1040
1037 /* Default config options */ 1041 /* Default config options */
1038 pi->conf_len = 0; 1042 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
1039 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
1040 skb_queue_head_init(TX_QUEUE(sk));
1041 skb_queue_head_init(SREJ_QUEUE(sk));
1042 skb_queue_head_init(BUSY_QUEUE(sk));
1043 INIT_LIST_HEAD(SREJ_LIST(sk));
1044} 1043}
1045 1044
1046static struct proto l2cap_proto = { 1045static struct proto l2cap_proto = {
@@ -1078,6 +1077,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1078 int kern) 1077 int kern)
1079{ 1078{
1080 struct sock *sk; 1079 struct sock *sk;
1080 struct l2cap_chan *chan;
1081 1081
1082 BT_DBG("sock %p", sock); 1082 BT_DBG("sock %p", sock);
1083 1083
@@ -1096,11 +1096,19 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1096 if (!sk) 1096 if (!sk)
1097 return -ENOMEM; 1097 return -ENOMEM;
1098 1098
1099 chan = l2cap_chan_alloc(sk);
1100 if (!chan) {
1101 l2cap_sock_kill(sk);
1102 return -ENOMEM;
1103 }
1104
1105 l2cap_pi(sk)->chan = chan;
1106
1099 l2cap_sock_init(sk, NULL); 1107 l2cap_sock_init(sk, NULL);
1100 return 0; 1108 return 0;
1101} 1109}
1102 1110
1103const struct proto_ops l2cap_sock_ops = { 1111static const struct proto_ops l2cap_sock_ops = {
1104 .family = PF_BLUETOOTH, 1112 .family = PF_BLUETOOTH,
1105 .owner = THIS_MODULE, 1113 .owner = THIS_MODULE,
1106 .release = l2cap_sock_release, 1114 .release = l2cap_sock_release,
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4476d8e3c0f2..2481d257ed98 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -36,7 +36,7 @@ struct pending_cmd {
36 struct list_head list; 36 struct list_head list;
37 __u16 opcode; 37 __u16 opcode;
38 int index; 38 int index;
39 void *cmd; 39 void *param;
40 struct sock *sk; 40 struct sock *sk;
41 void *user_data; 41 void *user_data;
42}; 42};
@@ -179,10 +179,12 @@ static int read_controller_info(struct sock *sk, u16 index)
179 179
180 hci_del_off_timer(hdev); 180 hci_del_off_timer(hdev);
181 181
182 hci_dev_lock_bh(hdev); 182 hci_dev_lock(hdev);
183 183
184 set_bit(HCI_MGMT, &hdev->flags); 184 set_bit(HCI_MGMT, &hdev->flags);
185 185
186 memset(&rp, 0, sizeof(rp));
187
186 rp.type = hdev->dev_type; 188 rp.type = hdev->dev_type;
187 189
188 rp.powered = test_bit(HCI_UP, &hdev->flags); 190 rp.powered = test_bit(HCI_UP, &hdev->flags);
@@ -204,7 +206,9 @@ static int read_controller_info(struct sock *sk, u16 index)
204 rp.hci_ver = hdev->hci_ver; 206 rp.hci_ver = hdev->hci_ver;
205 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev); 207 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
206 208
207 hci_dev_unlock_bh(hdev); 209 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
210
211 hci_dev_unlock(hdev);
208 hci_dev_put(hdev); 212 hci_dev_put(hdev);
209 213
210 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); 214 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
@@ -213,7 +217,7 @@ static int read_controller_info(struct sock *sk, u16 index)
213static void mgmt_pending_free(struct pending_cmd *cmd) 217static void mgmt_pending_free(struct pending_cmd *cmd)
214{ 218{
215 sock_put(cmd->sk); 219 sock_put(cmd->sk);
216 kfree(cmd->cmd); 220 kfree(cmd->param);
217 kfree(cmd); 221 kfree(cmd);
218} 222}
219 223
@@ -229,13 +233,14 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
229 cmd->opcode = opcode; 233 cmd->opcode = opcode;
230 cmd->index = index; 234 cmd->index = index;
231 235
232 cmd->cmd = kmalloc(len, GFP_ATOMIC); 236 cmd->param = kmalloc(len, GFP_ATOMIC);
233 if (!cmd->cmd) { 237 if (!cmd->param) {
234 kfree(cmd); 238 kfree(cmd);
235 return NULL; 239 return NULL;
236 } 240 }
237 241
238 memcpy(cmd->cmd, data, len); 242 if (data)
243 memcpy(cmd->param, data, len);
239 244
240 cmd->sk = sk; 245 cmd->sk = sk;
241 sock_hold(sk); 246 sock_hold(sk);
@@ -311,7 +316,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
311 if (!hdev) 316 if (!hdev)
312 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV); 317 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
313 318
314 hci_dev_lock_bh(hdev); 319 hci_dev_lock(hdev);
315 320
316 up = test_bit(HCI_UP, &hdev->flags); 321 up = test_bit(HCI_UP, &hdev->flags);
317 if ((cp->val && up) || (!cp->val && !up)) { 322 if ((cp->val && up) || (!cp->val && !up)) {
@@ -338,7 +343,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
338 err = 0; 343 err = 0;
339 344
340failed: 345failed:
341 hci_dev_unlock_bh(hdev); 346 hci_dev_unlock(hdev);
342 hci_dev_put(hdev); 347 hci_dev_put(hdev);
343 return err; 348 return err;
344} 349}
@@ -363,7 +368,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
363 if (!hdev) 368 if (!hdev)
364 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV); 369 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
365 370
366 hci_dev_lock_bh(hdev); 371 hci_dev_lock(hdev);
367 372
368 if (!test_bit(HCI_UP, &hdev->flags)) { 373 if (!test_bit(HCI_UP, &hdev->flags)) {
369 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); 374 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
@@ -398,7 +403,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
398 mgmt_pending_remove(cmd); 403 mgmt_pending_remove(cmd);
399 404
400failed: 405failed:
401 hci_dev_unlock_bh(hdev); 406 hci_dev_unlock(hdev);
402 hci_dev_put(hdev); 407 hci_dev_put(hdev);
403 408
404 return err; 409 return err;
@@ -424,7 +429,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
424 if (!hdev) 429 if (!hdev)
425 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV); 430 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
426 431
427 hci_dev_lock_bh(hdev); 432 hci_dev_lock(hdev);
428 433
429 if (!test_bit(HCI_UP, &hdev->flags)) { 434 if (!test_bit(HCI_UP, &hdev->flags)) {
430 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN); 435 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
@@ -458,7 +463,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
458 mgmt_pending_remove(cmd); 463 mgmt_pending_remove(cmd);
459 464
460failed: 465failed:
461 hci_dev_unlock_bh(hdev); 466 hci_dev_unlock(hdev);
462 hci_dev_put(hdev); 467 hci_dev_put(hdev);
463 468
464 return err; 469 return err;
@@ -517,7 +522,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
517 if (!hdev) 522 if (!hdev)
518 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV); 523 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
519 524
520 hci_dev_lock_bh(hdev); 525 hci_dev_lock(hdev);
521 526
522 if (cp->val) 527 if (cp->val)
523 set_bit(HCI_PAIRABLE, &hdev->flags); 528 set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -533,12 +538,156 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
533 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); 538 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
534 539
535failed: 540failed:
536 hci_dev_unlock_bh(hdev); 541 hci_dev_unlock(hdev);
537 hci_dev_put(hdev); 542 hci_dev_put(hdev);
538 543
539 return err; 544 return err;
540} 545}
541 546
547#define EIR_FLAGS 0x01 /* flags */
548#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
549#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
550#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
551#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
552#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
553#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
554#define EIR_NAME_SHORT 0x08 /* shortened local name */
555#define EIR_NAME_COMPLETE 0x09 /* complete local name */
556#define EIR_TX_POWER 0x0A /* transmit power level */
557#define EIR_DEVICE_ID 0x10 /* device ID */
558
559#define PNP_INFO_SVCLASS_ID 0x1200
560
561static u8 bluetooth_base_uuid[] = {
562 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
563 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
564};
565
566static u16 get_uuid16(u8 *uuid128)
567{
568 u32 val;
569 int i;
570
571 for (i = 0; i < 12; i++) {
572 if (bluetooth_base_uuid[i] != uuid128[i])
573 return 0;
574 }
575
576 memcpy(&val, &uuid128[12], 4);
577
578 val = le32_to_cpu(val);
579 if (val > 0xffff)
580 return 0;
581
582 return (u16) val;
583}
584
585static void create_eir(struct hci_dev *hdev, u8 *data)
586{
587 u8 *ptr = data;
588 u16 eir_len = 0;
589 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
590 int i, truncated = 0;
591 struct list_head *p;
592 size_t name_len;
593
594 name_len = strlen(hdev->dev_name);
595
596 if (name_len > 0) {
597 /* EIR Data type */
598 if (name_len > 48) {
599 name_len = 48;
600 ptr[1] = EIR_NAME_SHORT;
601 } else
602 ptr[1] = EIR_NAME_COMPLETE;
603
604 /* EIR Data length */
605 ptr[0] = name_len + 1;
606
607 memcpy(ptr + 2, hdev->dev_name, name_len);
608
609 eir_len += (name_len + 2);
610 ptr += (name_len + 2);
611 }
612
613 memset(uuid16_list, 0, sizeof(uuid16_list));
614
615 /* Group all UUID16 types */
616 list_for_each(p, &hdev->uuids) {
617 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
618 u16 uuid16;
619
620 uuid16 = get_uuid16(uuid->uuid);
621 if (uuid16 == 0)
622 return;
623
624 if (uuid16 < 0x1100)
625 continue;
626
627 if (uuid16 == PNP_INFO_SVCLASS_ID)
628 continue;
629
630 /* Stop if not enough space to put next UUID */
631 if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
632 truncated = 1;
633 break;
634 }
635
636 /* Check for duplicates */
637 for (i = 0; uuid16_list[i] != 0; i++)
638 if (uuid16_list[i] == uuid16)
639 break;
640
641 if (uuid16_list[i] == 0) {
642 uuid16_list[i] = uuid16;
643 eir_len += sizeof(u16);
644 }
645 }
646
647 if (uuid16_list[0] != 0) {
648 u8 *length = ptr;
649
650 /* EIR Data type */
651 ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
652
653 ptr += 2;
654 eir_len += 2;
655
656 for (i = 0; uuid16_list[i] != 0; i++) {
657 *ptr++ = (uuid16_list[i] & 0x00ff);
658 *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
659 }
660
661 /* EIR Data length */
662 *length = (i * sizeof(u16)) + 1;
663 }
664}
665
666static int update_eir(struct hci_dev *hdev)
667{
668 struct hci_cp_write_eir cp;
669
670 if (!(hdev->features[6] & LMP_EXT_INQ))
671 return 0;
672
673 if (hdev->ssp_mode == 0)
674 return 0;
675
676 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
677 return 0;
678
679 memset(&cp, 0, sizeof(cp));
680
681 create_eir(hdev, cp.data);
682
683 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
684 return 0;
685
686 memcpy(hdev->eir, cp.data, sizeof(cp.data));
687
688 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
689}
690
542static u8 get_service_classes(struct hci_dev *hdev) 691static u8 get_service_classes(struct hci_dev *hdev)
543{ 692{
544 struct list_head *p; 693 struct list_head *p;
@@ -590,7 +739,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
590 if (!hdev) 739 if (!hdev)
591 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV); 740 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
592 741
593 hci_dev_lock_bh(hdev); 742 hci_dev_lock(hdev);
594 743
595 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); 744 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
596 if (!uuid) { 745 if (!uuid) {
@@ -607,10 +756,14 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
607 if (err < 0) 756 if (err < 0)
608 goto failed; 757 goto failed;
609 758
759 err = update_eir(hdev);
760 if (err < 0)
761 goto failed;
762
610 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); 763 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
611 764
612failed: 765failed:
613 hci_dev_unlock_bh(hdev); 766 hci_dev_unlock(hdev);
614 hci_dev_put(hdev); 767 hci_dev_put(hdev);
615 768
616 return err; 769 return err;
@@ -635,7 +788,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
635 if (!hdev) 788 if (!hdev)
636 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV); 789 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
637 790
638 hci_dev_lock_bh(hdev); 791 hci_dev_lock(hdev);
639 792
640 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { 793 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
641 err = hci_uuids_clear(hdev); 794 err = hci_uuids_clear(hdev);
@@ -663,10 +816,14 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
663 if (err < 0) 816 if (err < 0)
664 goto unlock; 817 goto unlock;
665 818
819 err = update_eir(hdev);
820 if (err < 0)
821 goto unlock;
822
666 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); 823 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
667 824
668unlock: 825unlock:
669 hci_dev_unlock_bh(hdev); 826 hci_dev_unlock(hdev);
670 hci_dev_put(hdev); 827 hci_dev_put(hdev);
671 828
672 return err; 829 return err;
@@ -690,7 +847,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
690 if (!hdev) 847 if (!hdev)
691 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV); 848 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
692 849
693 hci_dev_lock_bh(hdev); 850 hci_dev_lock(hdev);
694 851
695 hdev->major_class = cp->major; 852 hdev->major_class = cp->major;
696 hdev->minor_class = cp->minor; 853 hdev->minor_class = cp->minor;
@@ -700,7 +857,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
700 if (err == 0) 857 if (err == 0)
701 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); 858 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
702 859
703 hci_dev_unlock_bh(hdev); 860 hci_dev_unlock(hdev);
704 hci_dev_put(hdev); 861 hci_dev_put(hdev);
705 862
706 return err; 863 return err;
@@ -722,7 +879,7 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
722 if (!hdev) 879 if (!hdev)
723 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV); 880 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
724 881
725 hci_dev_lock_bh(hdev); 882 hci_dev_lock(hdev);
726 883
727 BT_DBG("hci%u enable %d", index, cp->enable); 884 BT_DBG("hci%u enable %d", index, cp->enable);
728 885
@@ -732,13 +889,15 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
732 } else { 889 } else {
733 clear_bit(HCI_SERVICE_CACHE, &hdev->flags); 890 clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
734 err = update_class(hdev); 891 err = update_class(hdev);
892 if (err == 0)
893 err = update_eir(hdev);
735 } 894 }
736 895
737 if (err == 0) 896 if (err == 0)
738 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, 897 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
739 0); 898 0);
740 899
741 hci_dev_unlock_bh(hdev); 900 hci_dev_unlock(hdev);
742 hci_dev_put(hdev); 901 hci_dev_put(hdev);
743 902
744 return err; 903 return err;
@@ -772,7 +931,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
772 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, 931 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
773 key_count); 932 key_count);
774 933
775 hci_dev_lock_bh(hdev); 934 hci_dev_lock(hdev);
776 935
777 hci_link_keys_clear(hdev); 936 hci_link_keys_clear(hdev);
778 937
@@ -786,11 +945,11 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
786 for (i = 0; i < key_count; i++) { 945 for (i = 0; i < key_count; i++) {
787 struct mgmt_key_info *key = &cp->keys[i]; 946 struct mgmt_key_info *key = &cp->keys[i];
788 947
789 hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->type, 948 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
790 key->pin_len); 949 key->pin_len);
791 } 950 }
792 951
793 hci_dev_unlock_bh(hdev); 952 hci_dev_unlock(hdev);
794 hci_dev_put(hdev); 953 hci_dev_put(hdev);
795 954
796 return 0; 955 return 0;
@@ -812,7 +971,7 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
812 if (!hdev) 971 if (!hdev)
813 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); 972 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
814 973
815 hci_dev_lock_bh(hdev); 974 hci_dev_lock(hdev);
816 975
817 err = hci_remove_link_key(hdev, &cp->bdaddr); 976 err = hci_remove_link_key(hdev, &cp->bdaddr);
818 if (err < 0) { 977 if (err < 0) {
@@ -835,7 +994,7 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
835 } 994 }
836 995
837unlock: 996unlock:
838 hci_dev_unlock_bh(hdev); 997 hci_dev_unlock(hdev);
839 hci_dev_put(hdev); 998 hci_dev_put(hdev);
840 999
841 return err; 1000 return err;
@@ -861,7 +1020,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
861 if (!hdev) 1020 if (!hdev)
862 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV); 1021 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
863 1022
864 hci_dev_lock_bh(hdev); 1023 hci_dev_lock(hdev);
865 1024
866 if (!test_bit(HCI_UP, &hdev->flags)) { 1025 if (!test_bit(HCI_UP, &hdev->flags)) {
867 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN); 1026 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
@@ -893,7 +1052,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
893 mgmt_pending_remove(cmd); 1052 mgmt_pending_remove(cmd);
894 1053
895failed: 1054failed:
896 hci_dev_unlock_bh(hdev); 1055 hci_dev_unlock(hdev);
897 hci_dev_put(hdev); 1056 hci_dev_put(hdev);
898 1057
899 return err; 1058 return err;
@@ -914,7 +1073,7 @@ static int get_connections(struct sock *sk, u16 index)
914 if (!hdev) 1073 if (!hdev)
915 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV); 1074 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
916 1075
917 hci_dev_lock_bh(hdev); 1076 hci_dev_lock(hdev);
918 1077
919 count = 0; 1078 count = 0;
920 list_for_each(p, &hdev->conn_hash.list) { 1079 list_for_each(p, &hdev->conn_hash.list) {
@@ -945,7 +1104,7 @@ static int get_connections(struct sock *sk, u16 index)
945 1104
946unlock: 1105unlock:
947 kfree(rp); 1106 kfree(rp);
948 hci_dev_unlock_bh(hdev); 1107 hci_dev_unlock(hdev);
949 hci_dev_put(hdev); 1108 hci_dev_put(hdev);
950 return err; 1109 return err;
951} 1110}
@@ -970,7 +1129,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
970 if (!hdev) 1129 if (!hdev)
971 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV); 1130 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
972 1131
973 hci_dev_lock_bh(hdev); 1132 hci_dev_lock(hdev);
974 1133
975 if (!test_bit(HCI_UP, &hdev->flags)) { 1134 if (!test_bit(HCI_UP, &hdev->flags)) {
976 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); 1135 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
@@ -992,7 +1151,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
992 mgmt_pending_remove(cmd); 1151 mgmt_pending_remove(cmd);
993 1152
994failed: 1153failed:
995 hci_dev_unlock_bh(hdev); 1154 hci_dev_unlock(hdev);
996 hci_dev_put(hdev); 1155 hci_dev_put(hdev);
997 1156
998 return err; 1157 return err;
@@ -1019,7 +1178,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1019 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1178 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1020 ENODEV); 1179 ENODEV);
1021 1180
1022 hci_dev_lock_bh(hdev); 1181 hci_dev_lock(hdev);
1023 1182
1024 if (!test_bit(HCI_UP, &hdev->flags)) { 1183 if (!test_bit(HCI_UP, &hdev->flags)) {
1025 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1184 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
@@ -1040,7 +1199,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1040 mgmt_pending_remove(cmd); 1199 mgmt_pending_remove(cmd);
1041 1200
1042failed: 1201failed:
1043 hci_dev_unlock_bh(hdev); 1202 hci_dev_unlock(hdev);
1044 hci_dev_put(hdev); 1203 hci_dev_put(hdev);
1045 1204
1046 return err; 1205 return err;
@@ -1063,14 +1222,14 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1063 if (!hdev) 1222 if (!hdev)
1064 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV); 1223 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
1065 1224
1066 hci_dev_lock_bh(hdev); 1225 hci_dev_lock(hdev);
1067 1226
1068 hdev->io_capability = cp->io_capability; 1227 hdev->io_capability = cp->io_capability;
1069 1228
1070 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1229 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1071 hdev->io_capability); 1230 hdev->io_capability);
1072 1231
1073 hci_dev_unlock_bh(hdev); 1232 hci_dev_unlock(hdev);
1074 hci_dev_put(hdev); 1233 hci_dev_put(hdev);
1075 1234
1076 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); 1235 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1156,7 +1315,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1156 if (!hdev) 1315 if (!hdev)
1157 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV); 1316 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
1158 1317
1159 hci_dev_lock_bh(hdev); 1318 hci_dev_lock(hdev);
1160 1319
1161 if (cp->io_cap == 0x03) { 1320 if (cp->io_cap == 0x03) {
1162 sec_level = BT_SECURITY_MEDIUM; 1321 sec_level = BT_SECURITY_MEDIUM;
@@ -1198,7 +1357,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1198 err = 0; 1357 err = 0;
1199 1358
1200unlock: 1359unlock:
1201 hci_dev_unlock_bh(hdev); 1360 hci_dev_unlock(hdev);
1202 hci_dev_put(hdev); 1361 hci_dev_put(hdev);
1203 1362
1204 return err; 1363 return err;
@@ -1230,7 +1389,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1230 if (!hdev) 1389 if (!hdev)
1231 return cmd_status(sk, index, mgmt_op, ENODEV); 1390 return cmd_status(sk, index, mgmt_op, ENODEV);
1232 1391
1233 hci_dev_lock_bh(hdev); 1392 hci_dev_lock(hdev);
1234 1393
1235 if (!test_bit(HCI_UP, &hdev->flags)) { 1394 if (!test_bit(HCI_UP, &hdev->flags)) {
1236 err = cmd_status(sk, index, mgmt_op, ENETDOWN); 1395 err = cmd_status(sk, index, mgmt_op, ENETDOWN);
@@ -1248,6 +1407,231 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1248 mgmt_pending_remove(cmd); 1407 mgmt_pending_remove(cmd);
1249 1408
1250failed: 1409failed:
1410 hci_dev_unlock(hdev);
1411 hci_dev_put(hdev);
1412
1413 return err;
1414}
1415
1416static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1417 u16 len)
1418{
1419 struct mgmt_cp_set_local_name *mgmt_cp = (void *) data;
1420 struct hci_cp_write_local_name hci_cp;
1421 struct hci_dev *hdev;
1422 struct pending_cmd *cmd;
1423 int err;
1424
1425 BT_DBG("");
1426
1427 if (len != sizeof(*mgmt_cp))
1428 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
1429
1430 hdev = hci_dev_get(index);
1431 if (!hdev)
1432 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
1433
1434 hci_dev_lock(hdev);
1435
1436 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
1437 if (!cmd) {
1438 err = -ENOMEM;
1439 goto failed;
1440 }
1441
1442 memcpy(hci_cp.name, mgmt_cp->name, sizeof(hci_cp.name));
1443 err = hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(hci_cp),
1444 &hci_cp);
1445 if (err < 0)
1446 mgmt_pending_remove(cmd);
1447
1448failed:
1449 hci_dev_unlock(hdev);
1450 hci_dev_put(hdev);
1451
1452 return err;
1453}
1454
1455static int read_local_oob_data(struct sock *sk, u16 index)
1456{
1457 struct hci_dev *hdev;
1458 struct pending_cmd *cmd;
1459 int err;
1460
1461 BT_DBG("hci%u", index);
1462
1463 hdev = hci_dev_get(index);
1464 if (!hdev)
1465 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1466 ENODEV);
1467
1468 hci_dev_lock(hdev);
1469
1470 if (!test_bit(HCI_UP, &hdev->flags)) {
1471 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1472 ENETDOWN);
1473 goto unlock;
1474 }
1475
1476 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
1477 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1478 EOPNOTSUPP);
1479 goto unlock;
1480 }
1481
1482 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
1483 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
1484 goto unlock;
1485 }
1486
1487 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
1488 if (!cmd) {
1489 err = -ENOMEM;
1490 goto unlock;
1491 }
1492
1493 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
1494 if (err < 0)
1495 mgmt_pending_remove(cmd);
1496
1497unlock:
1498 hci_dev_unlock(hdev);
1499 hci_dev_put(hdev);
1500
1501 return err;
1502}
1503
1504static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1505 u16 len)
1506{
1507 struct hci_dev *hdev;
1508 struct mgmt_cp_add_remote_oob_data *cp = (void *) data;
1509 int err;
1510
1511 BT_DBG("hci%u ", index);
1512
1513 if (len != sizeof(*cp))
1514 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1515 EINVAL);
1516
1517 hdev = hci_dev_get(index);
1518 if (!hdev)
1519 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1520 ENODEV);
1521
1522 hci_dev_lock(hdev);
1523
1524 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
1525 cp->randomizer);
1526 if (err < 0)
1527 err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
1528 else
1529 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
1530 0);
1531
1532 hci_dev_unlock(hdev);
1533 hci_dev_put(hdev);
1534
1535 return err;
1536}
1537
1538static int remove_remote_oob_data(struct sock *sk, u16 index,
1539 unsigned char *data, u16 len)
1540{
1541 struct hci_dev *hdev;
1542 struct mgmt_cp_remove_remote_oob_data *cp = (void *) data;
1543 int err;
1544
1545 BT_DBG("hci%u ", index);
1546
1547 if (len != sizeof(*cp))
1548 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1549 EINVAL);
1550
1551 hdev = hci_dev_get(index);
1552 if (!hdev)
1553 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1554 ENODEV);
1555
1556 hci_dev_lock(hdev);
1557
1558 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
1559 if (err < 0)
1560 err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1561 -err);
1562 else
1563 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1564 NULL, 0);
1565
1566 hci_dev_unlock(hdev);
1567 hci_dev_put(hdev);
1568
1569 return err;
1570}
1571
1572static int start_discovery(struct sock *sk, u16 index)
1573{
1574 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1575 struct hci_cp_inquiry cp;
1576 struct pending_cmd *cmd;
1577 struct hci_dev *hdev;
1578 int err;
1579
1580 BT_DBG("hci%u", index);
1581
1582 hdev = hci_dev_get(index);
1583 if (!hdev)
1584 return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
1585
1586 hci_dev_lock_bh(hdev);
1587
1588 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
1589 if (!cmd) {
1590 err = -ENOMEM;
1591 goto failed;
1592 }
1593
1594 memset(&cp, 0, sizeof(cp));
1595 memcpy(&cp.lap, lap, 3);
1596 cp.length = 0x08;
1597 cp.num_rsp = 0x00;
1598
1599 err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1600 if (err < 0)
1601 mgmt_pending_remove(cmd);
1602
1603failed:
1604 hci_dev_unlock_bh(hdev);
1605 hci_dev_put(hdev);
1606
1607 return err;
1608}
1609
1610static int stop_discovery(struct sock *sk, u16 index)
1611{
1612 struct hci_dev *hdev;
1613 struct pending_cmd *cmd;
1614 int err;
1615
1616 BT_DBG("hci%u", index);
1617
1618 hdev = hci_dev_get(index);
1619 if (!hdev)
1620 return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
1621
1622 hci_dev_lock_bh(hdev);
1623
1624 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0);
1625 if (!cmd) {
1626 err = -ENOMEM;
1627 goto failed;
1628 }
1629
1630 err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1631 if (err < 0)
1632 mgmt_pending_remove(cmd);
1633
1634failed:
1251 hci_dev_unlock_bh(hdev); 1635 hci_dev_unlock_bh(hdev);
1252 hci_dev_put(hdev); 1636 hci_dev_put(hdev);
1253 1637
@@ -1266,7 +1650,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1266 if (msglen < sizeof(*hdr)) 1650 if (msglen < sizeof(*hdr))
1267 return -EINVAL; 1651 return -EINVAL;
1268 1652
1269 buf = kmalloc(msglen, GFP_ATOMIC); 1653 buf = kmalloc(msglen, GFP_KERNEL);
1270 if (!buf) 1654 if (!buf)
1271 return -ENOMEM; 1655 return -ENOMEM;
1272 1656
@@ -1349,6 +1733,25 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1349 case MGMT_OP_USER_CONFIRM_NEG_REPLY: 1733 case MGMT_OP_USER_CONFIRM_NEG_REPLY:
1350 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0); 1734 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
1351 break; 1735 break;
1736 case MGMT_OP_SET_LOCAL_NAME:
1737 err = set_local_name(sk, index, buf + sizeof(*hdr), len);
1738 break;
1739 case MGMT_OP_READ_LOCAL_OOB_DATA:
1740 err = read_local_oob_data(sk, index);
1741 break;
1742 case MGMT_OP_ADD_REMOTE_OOB_DATA:
1743 err = add_remote_oob_data(sk, index, buf + sizeof(*hdr), len);
1744 break;
1745 case MGMT_OP_REMOVE_REMOTE_OOB_DATA:
1746 err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr),
1747 len);
1748 break;
1749 case MGMT_OP_START_DISCOVERY:
1750 err = start_discovery(sk, index);
1751 break;
1752 case MGMT_OP_STOP_DISCOVERY:
1753 err = stop_discovery(sk, index);
1754 break;
1352 default: 1755 default:
1353 BT_DBG("Unknown op %u", opcode); 1756 BT_DBG("Unknown op %u", opcode);
1354 err = cmd_status(sk, index, opcode, 0x01); 1757 err = cmd_status(sk, index, opcode, 0x01);
@@ -1382,7 +1785,7 @@ struct cmd_lookup {
1382 1785
1383static void mode_rsp(struct pending_cmd *cmd, void *data) 1786static void mode_rsp(struct pending_cmd *cmd, void *data)
1384{ 1787{
1385 struct mgmt_mode *cp = cmd->cmd; 1788 struct mgmt_mode *cp = cmd->param;
1386 struct cmd_lookup *match = data; 1789 struct cmd_lookup *match = data;
1387 1790
1388 if (cp->val != match->val) 1791 if (cp->val != match->val)
@@ -1455,17 +1858,17 @@ int mgmt_connectable(u16 index, u8 connectable)
1455 return ret; 1858 return ret;
1456} 1859}
1457 1860
1458int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type) 1861int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
1459{ 1862{
1460 struct mgmt_ev_new_key ev; 1863 struct mgmt_ev_new_key ev;
1461 1864
1462 memset(&ev, 0, sizeof(ev)); 1865 memset(&ev, 0, sizeof(ev));
1463 1866
1867 ev.store_hint = persistent;
1464 bacpy(&ev.key.bdaddr, &key->bdaddr); 1868 bacpy(&ev.key.bdaddr, &key->bdaddr);
1465 ev.key.type = key->type; 1869 ev.key.type = key->type;
1466 memcpy(ev.key.val, key->val, 16); 1870 memcpy(ev.key.val, key->val, 16);
1467 ev.key.pin_len = key->pin_len; 1871 ev.key.pin_len = key->pin_len;
1468 ev.old_key_type = old_key_type;
1469 1872
1470 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); 1873 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
1471} 1874}
@@ -1481,7 +1884,7 @@ int mgmt_connected(u16 index, bdaddr_t *bdaddr)
1481 1884
1482static void disconnect_rsp(struct pending_cmd *cmd, void *data) 1885static void disconnect_rsp(struct pending_cmd *cmd, void *data)
1483{ 1886{
1484 struct mgmt_cp_disconnect *cp = cmd->cmd; 1887 struct mgmt_cp_disconnect *cp = cmd->param;
1485 struct sock **sk = data; 1888 struct sock **sk = data;
1486 struct mgmt_rp_disconnect rp; 1889 struct mgmt_rp_disconnect rp;
1487 1890
@@ -1539,11 +1942,12 @@ int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
1539 return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL); 1942 return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
1540} 1943}
1541 1944
1542int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr) 1945int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure)
1543{ 1946{
1544 struct mgmt_ev_pin_code_request ev; 1947 struct mgmt_ev_pin_code_request ev;
1545 1948
1546 bacpy(&ev.bdaddr, bdaddr); 1949 bacpy(&ev.bdaddr, bdaddr);
1950 ev.secure = secure;
1547 1951
1548 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev), 1952 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
1549 NULL); 1953 NULL);
@@ -1591,13 +1995,15 @@ int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1591 return err; 1995 return err;
1592} 1996}
1593 1997
1594int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value) 1998int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
1999 u8 confirm_hint)
1595{ 2000{
1596 struct mgmt_ev_user_confirm_request ev; 2001 struct mgmt_ev_user_confirm_request ev;
1597 2002
1598 BT_DBG("hci%u", index); 2003 BT_DBG("hci%u", index);
1599 2004
1600 bacpy(&ev.bdaddr, bdaddr); 2005 bacpy(&ev.bdaddr, bdaddr);
2006 ev.confirm_hint = confirm_hint;
1601 put_unaligned_le32(value, &ev.value); 2007 put_unaligned_le32(value, &ev.value);
1602 2008
1603 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev), 2009 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
@@ -1645,3 +2051,110 @@ int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
1645 2051
1646 return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL); 2052 return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
1647} 2053}
2054
2055int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
2056{
2057 struct pending_cmd *cmd;
2058 struct hci_dev *hdev;
2059 struct mgmt_cp_set_local_name ev;
2060 int err;
2061
2062 memset(&ev, 0, sizeof(ev));
2063 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2064
2065 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
2066 if (!cmd)
2067 goto send_event;
2068
2069 if (status) {
2070 err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
2071 goto failed;
2072 }
2073
2074 hdev = hci_dev_get(index);
2075 if (hdev) {
2076 hci_dev_lock_bh(hdev);
2077 update_eir(hdev);
2078 hci_dev_unlock_bh(hdev);
2079 hci_dev_put(hdev);
2080 }
2081
2082 err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
2083 sizeof(ev));
2084 if (err < 0)
2085 goto failed;
2086
2087send_event:
2088 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
2089 cmd ? cmd->sk : NULL);
2090
2091failed:
2092 if (cmd)
2093 mgmt_pending_remove(cmd);
2094 return err;
2095}
2096
2097int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
2098 u8 status)
2099{
2100 struct pending_cmd *cmd;
2101 int err;
2102
2103 BT_DBG("hci%u status %u", index, status);
2104
2105 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
2106 if (!cmd)
2107 return -ENOENT;
2108
2109 if (status) {
2110 err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
2111 EIO);
2112 } else {
2113 struct mgmt_rp_read_local_oob_data rp;
2114
2115 memcpy(rp.hash, hash, sizeof(rp.hash));
2116 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
2117
2118 err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
2119 &rp, sizeof(rp));
2120 }
2121
2122 mgmt_pending_remove(cmd);
2123
2124 return err;
2125}
2126
2127int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
2128 u8 *eir)
2129{
2130 struct mgmt_ev_device_found ev;
2131
2132 memset(&ev, 0, sizeof(ev));
2133
2134 bacpy(&ev.bdaddr, bdaddr);
2135 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
2136 ev.rssi = rssi;
2137
2138 if (eir)
2139 memcpy(ev.eir, eir, sizeof(ev.eir));
2140
2141 return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
2142}
2143
2144int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
2145{
2146 struct mgmt_ev_remote_name ev;
2147
2148 memset(&ev, 0, sizeof(ev));
2149
2150 bacpy(&ev.bdaddr, bdaddr);
2151 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2152
2153 return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
2154}
2155
2156int mgmt_discovering(u16 index, u8 discovering)
2157{
2158 return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
2159 sizeof(discovering), NULL);
2160}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index c9973932456f..121a5c13b989 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -232,6 +232,8 @@ static int rfcomm_l2sock_create(struct socket **sock)
232static inline int rfcomm_check_security(struct rfcomm_dlc *d) 232static inline int rfcomm_check_security(struct rfcomm_dlc *d)
233{ 233{
234 struct sock *sk = d->session->sock->sk; 234 struct sock *sk = d->session->sock->sk;
235 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
236
235 __u8 auth_type; 237 __u8 auth_type;
236 238
237 switch (d->sec_level) { 239 switch (d->sec_level) {
@@ -246,8 +248,7 @@ static inline int rfcomm_check_security(struct rfcomm_dlc *d)
246 break; 248 break;
247 } 249 }
248 250
249 return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level, 251 return hci_conn_security(conn->hcon, d->sec_level, auth_type);
250 auth_type);
251} 252}
252 253
253static void rfcomm_session_timeout(unsigned long arg) 254static void rfcomm_session_timeout(unsigned long arg)
@@ -710,10 +711,10 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
710 /* Set L2CAP options */ 711 /* Set L2CAP options */
711 sk = sock->sk; 712 sk = sock->sk;
712 lock_sock(sk); 713 lock_sock(sk);
713 l2cap_pi(sk)->imtu = l2cap_mtu; 714 l2cap_pi(sk)->chan->imtu = l2cap_mtu;
714 l2cap_pi(sk)->sec_level = sec_level; 715 l2cap_pi(sk)->chan->sec_level = sec_level;
715 if (l2cap_ertm) 716 if (l2cap_ertm)
716 l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; 717 l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM;
717 release_sock(sk); 718 release_sock(sk);
718 719
719 s = rfcomm_session_add(sock, BT_BOUND); 720 s = rfcomm_session_add(sock, BT_BOUND);
@@ -1241,6 +1242,7 @@ static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci)
1241void rfcomm_dlc_accept(struct rfcomm_dlc *d) 1242void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1242{ 1243{
1243 struct sock *sk = d->session->sock->sk; 1244 struct sock *sk = d->session->sock->sk;
1245 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1244 1246
1245 BT_DBG("dlc %p", d); 1247 BT_DBG("dlc %p", d);
1246 1248
@@ -1254,7 +1256,7 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1254 rfcomm_dlc_unlock(d); 1256 rfcomm_dlc_unlock(d);
1255 1257
1256 if (d->role_switch) 1258 if (d->role_switch)
1257 hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00); 1259 hci_conn_switch_role(conn->hcon, 0x00);
1258 1260
1259 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); 1261 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig);
1260} 1262}
@@ -1890,7 +1892,8 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1890 1892
1891 /* We should adjust MTU on incoming sessions. 1893 /* We should adjust MTU on incoming sessions.
1892 * L2CAP MTU minus UIH header and FCS. */ 1894 * L2CAP MTU minus UIH header and FCS. */
1893 s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5; 1895 s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu,
1896 l2cap_pi(nsock->sk)->chan->imtu) - 5;
1894 1897
1895 rfcomm_schedule(); 1898 rfcomm_schedule();
1896 } else 1899 } else
@@ -1909,7 +1912,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
1909 1912
1910 /* We can adjust MTU on outgoing sessions. 1913 /* We can adjust MTU on outgoing sessions.
1911 * L2CAP MTU minus UIH header and FCS. */ 1914 * L2CAP MTU minus UIH header and FCS. */
1912 s->mtu = min(l2cap_pi(sk)->omtu, l2cap_pi(sk)->imtu) - 5; 1915 s->mtu = min(l2cap_pi(sk)->chan->omtu, l2cap_pi(sk)->chan->imtu) - 5;
1913 1916
1914 rfcomm_send_sabm(s, 0); 1917 rfcomm_send_sabm(s, 0);
1915 break; 1918 break;
@@ -1992,7 +1995,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
1992 /* Set L2CAP options */ 1995 /* Set L2CAP options */
1993 sk = sock->sk; 1996 sk = sock->sk;
1994 lock_sock(sk); 1997 lock_sock(sk);
1995 l2cap_pi(sk)->imtu = l2cap_mtu; 1998 l2cap_pi(sk)->chan->imtu = l2cap_mtu;
1996 release_sock(sk); 1999 release_sock(sk);
1997 2000
1998 /* Start listening on the socket */ 2001 /* Start listening on the socket */
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 66cc1f0c3df8..386cfaffd4b7 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -743,6 +743,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
743 struct sock *sk = sock->sk; 743 struct sock *sk = sock->sk;
744 struct sock *l2cap_sk; 744 struct sock *l2cap_sk;
745 struct rfcomm_conninfo cinfo; 745 struct rfcomm_conninfo cinfo;
746 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
746 int len, err = 0; 747 int len, err = 0;
747 u32 opt; 748 u32 opt;
748 749
@@ -787,8 +788,8 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
787 788
788 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; 789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
789 790
790 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle; 791 cinfo.hci_handle = conn->hcon->handle;
791 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3); 792 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
792 793
793 len = min_t(unsigned int, len, sizeof(cinfo)); 794 len = min_t(unsigned int, len, sizeof(cinfo));
794 if (copy_to_user(optval, (char *) &cinfo, len)) 795 if (copy_to_user(optval, (char *) &cinfo, len))
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 84bbb82599b2..f20c4fd915a8 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -104,3 +104,4 @@ module_init(br_init)
104module_exit(br_deinit) 104module_exit(br_deinit)
105MODULE_LICENSE("GPL"); 105MODULE_LICENSE("GPL");
106MODULE_VERSION(BR_VERSION); 106MODULE_VERSION(BR_VERSION);
107MODULE_ALIAS_RTNL_LINK("bridge");
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 21e5901186ea..a6b2f86378c7 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -74,13 +74,23 @@ out:
74 return NETDEV_TX_OK; 74 return NETDEV_TX_OK;
75} 75}
76 76
77static int br_dev_init(struct net_device *dev)
78{
79 struct net_bridge *br = netdev_priv(dev);
80
81 br->stats = alloc_percpu(struct br_cpu_netstats);
82 if (!br->stats)
83 return -ENOMEM;
84
85 return 0;
86}
87
77static int br_dev_open(struct net_device *dev) 88static int br_dev_open(struct net_device *dev)
78{ 89{
79 struct net_bridge *br = netdev_priv(dev); 90 struct net_bridge *br = netdev_priv(dev);
80 91
81 netif_carrier_off(dev); 92 netif_carrier_off(dev);
82 93 netdev_update_features(dev);
83 br_features_recompute(br);
84 netif_start_queue(dev); 94 netif_start_queue(dev);
85 br_stp_enable_bridge(br); 95 br_stp_enable_bridge(br);
86 br_multicast_open(br); 96 br_multicast_open(br);
@@ -177,48 +187,11 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
177 strcpy(info->bus_info, "N/A"); 187 strcpy(info->bus_info, "N/A");
178} 188}
179 189
180static int br_set_sg(struct net_device *dev, u32 data) 190static u32 br_fix_features(struct net_device *dev, u32 features)
181{
182 struct net_bridge *br = netdev_priv(dev);
183
184 if (data)
185 br->feature_mask |= NETIF_F_SG;
186 else
187 br->feature_mask &= ~NETIF_F_SG;
188
189 br_features_recompute(br);
190 return 0;
191}
192
193static int br_set_tso(struct net_device *dev, u32 data)
194{
195 struct net_bridge *br = netdev_priv(dev);
196
197 if (data)
198 br->feature_mask |= NETIF_F_TSO;
199 else
200 br->feature_mask &= ~NETIF_F_TSO;
201
202 br_features_recompute(br);
203 return 0;
204}
205
206static int br_set_tx_csum(struct net_device *dev, u32 data)
207{ 191{
208 struct net_bridge *br = netdev_priv(dev); 192 struct net_bridge *br = netdev_priv(dev);
209 193
210 if (data) 194 return br_features_recompute(br, features);
211 br->feature_mask |= NETIF_F_NO_CSUM;
212 else
213 br->feature_mask &= ~NETIF_F_ALL_CSUM;
214
215 br_features_recompute(br);
216 return 0;
217}
218
219static int br_set_flags(struct net_device *netdev, u32 data)
220{
221 return ethtool_op_set_flags(netdev, data, ETH_FLAG_TXVLAN);
222} 195}
223 196
224#ifdef CONFIG_NET_POLL_CONTROLLER 197#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -319,21 +292,12 @@ static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
319static const struct ethtool_ops br_ethtool_ops = { 292static const struct ethtool_ops br_ethtool_ops = {
320 .get_drvinfo = br_getinfo, 293 .get_drvinfo = br_getinfo,
321 .get_link = ethtool_op_get_link, 294 .get_link = ethtool_op_get_link,
322 .get_tx_csum = ethtool_op_get_tx_csum,
323 .set_tx_csum = br_set_tx_csum,
324 .get_sg = ethtool_op_get_sg,
325 .set_sg = br_set_sg,
326 .get_tso = ethtool_op_get_tso,
327 .set_tso = br_set_tso,
328 .get_ufo = ethtool_op_get_ufo,
329 .set_ufo = ethtool_op_set_ufo,
330 .get_flags = ethtool_op_get_flags,
331 .set_flags = br_set_flags,
332}; 295};
333 296
334static const struct net_device_ops br_netdev_ops = { 297static const struct net_device_ops br_netdev_ops = {
335 .ndo_open = br_dev_open, 298 .ndo_open = br_dev_open,
336 .ndo_stop = br_dev_stop, 299 .ndo_stop = br_dev_stop,
300 .ndo_init = br_dev_init,
337 .ndo_start_xmit = br_dev_xmit, 301 .ndo_start_xmit = br_dev_xmit,
338 .ndo_get_stats64 = br_get_stats64, 302 .ndo_get_stats64 = br_get_stats64,
339 .ndo_set_mac_address = br_set_mac_address, 303 .ndo_set_mac_address = br_set_mac_address,
@@ -347,6 +311,7 @@ static const struct net_device_ops br_netdev_ops = {
347#endif 311#endif
348 .ndo_add_slave = br_add_slave, 312 .ndo_add_slave = br_add_slave,
349 .ndo_del_slave = br_del_slave, 313 .ndo_del_slave = br_del_slave,
314 .ndo_fix_features = br_fix_features,
350}; 315};
351 316
352static void br_dev_free(struct net_device *dev) 317static void br_dev_free(struct net_device *dev)
@@ -357,18 +322,49 @@ static void br_dev_free(struct net_device *dev)
357 free_netdev(dev); 322 free_netdev(dev);
358} 323}
359 324
325static struct device_type br_type = {
326 .name = "bridge",
327};
328
360void br_dev_setup(struct net_device *dev) 329void br_dev_setup(struct net_device *dev)
361{ 330{
331 struct net_bridge *br = netdev_priv(dev);
332
362 random_ether_addr(dev->dev_addr); 333 random_ether_addr(dev->dev_addr);
363 ether_setup(dev); 334 ether_setup(dev);
364 335
365 dev->netdev_ops = &br_netdev_ops; 336 dev->netdev_ops = &br_netdev_ops;
366 dev->destructor = br_dev_free; 337 dev->destructor = br_dev_free;
367 SET_ETHTOOL_OPS(dev, &br_ethtool_ops); 338 SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
339 SET_NETDEV_DEVTYPE(dev, &br_type);
368 dev->tx_queue_len = 0; 340 dev->tx_queue_len = 0;
369 dev->priv_flags = IFF_EBRIDGE; 341 dev->priv_flags = IFF_EBRIDGE;
370 342
371 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 343 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
372 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | 344 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
373 NETIF_F_NETNS_LOCAL | NETIF_F_GSO | NETIF_F_HW_VLAN_TX; 345 NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX;
346 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
347 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM |
348 NETIF_F_HW_VLAN_TX;
349
350 br->dev = dev;
351 spin_lock_init(&br->lock);
352 INIT_LIST_HEAD(&br->port_list);
353 spin_lock_init(&br->hash_lock);
354
355 br->bridge_id.prio[0] = 0x80;
356 br->bridge_id.prio[1] = 0x00;
357
358 memcpy(br->group_addr, br_group_address, ETH_ALEN);
359
360 br->stp_enabled = BR_NO_STP;
361 br->designated_root = br->bridge_id;
362 br->bridge_max_age = br->max_age = 20 * HZ;
363 br->bridge_hello_time = br->hello_time = 2 * HZ;
364 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
365 br->ageing_time = 300 * HZ;
366
367 br_netfilter_rtable_init(br);
368 br_stp_timer_init(br);
369 br_multicast_init(br);
374} 370}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index cc4d3c5ab1c6..e0dfbc151dd7 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -28,6 +28,7 @@
28static struct kmem_cache *br_fdb_cache __read_mostly; 28static struct kmem_cache *br_fdb_cache __read_mostly;
29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
30 const unsigned char *addr); 30 const unsigned char *addr);
31static void fdb_notify(const struct net_bridge_fdb_entry *, int);
31 32
32static u32 fdb_salt __read_mostly; 33static u32 fdb_salt __read_mostly;
33 34
@@ -62,7 +63,7 @@ static inline int has_expired(const struct net_bridge *br,
62 const struct net_bridge_fdb_entry *fdb) 63 const struct net_bridge_fdb_entry *fdb)
63{ 64{
64 return !fdb->is_static && 65 return !fdb->is_static &&
65 time_before_eq(fdb->ageing_timer + hold_time(br), jiffies); 66 time_before_eq(fdb->updated + hold_time(br), jiffies);
66} 67}
67 68
68static inline int br_mac_hash(const unsigned char *mac) 69static inline int br_mac_hash(const unsigned char *mac)
@@ -81,6 +82,7 @@ static void fdb_rcu_free(struct rcu_head *head)
81 82
82static inline void fdb_delete(struct net_bridge_fdb_entry *f) 83static inline void fdb_delete(struct net_bridge_fdb_entry *f)
83{ 84{
85 fdb_notify(f, RTM_DELNEIGH);
84 hlist_del_rcu(&f->hlist); 86 hlist_del_rcu(&f->hlist);
85 call_rcu(&f->rcu, fdb_rcu_free); 87 call_rcu(&f->rcu, fdb_rcu_free);
86} 88}
@@ -140,7 +142,7 @@ void br_fdb_cleanup(unsigned long _data)
140 unsigned long this_timer; 142 unsigned long this_timer;
141 if (f->is_static) 143 if (f->is_static)
142 continue; 144 continue;
143 this_timer = f->ageing_timer + delay; 145 this_timer = f->updated + delay;
144 if (time_before_eq(this_timer, jiffies)) 146 if (time_before_eq(this_timer, jiffies))
145 fdb_delete(f); 147 fdb_delete(f);
146 else if (time_before(this_timer, next_timer)) 148 else if (time_before(this_timer, next_timer))
@@ -293,7 +295,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
293 295
294 fe->is_local = f->is_local; 296 fe->is_local = f->is_local;
295 if (!f->is_static) 297 if (!f->is_static)
296 fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->ageing_timer); 298 fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->updated);
297 ++fe; 299 ++fe;
298 ++num; 300 ++num;
299 } 301 }
@@ -305,8 +307,21 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
305 return num; 307 return num;
306} 308}
307 309
308static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, 310static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
309 const unsigned char *addr) 311 const unsigned char *addr)
312{
313 struct hlist_node *h;
314 struct net_bridge_fdb_entry *fdb;
315
316 hlist_for_each_entry(fdb, h, head, hlist) {
317 if (!compare_ether_addr(fdb->addr.addr, addr))
318 return fdb;
319 }
320 return NULL;
321}
322
323static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
324 const unsigned char *addr)
310{ 325{
311 struct hlist_node *h; 326 struct hlist_node *h;
312 struct net_bridge_fdb_entry *fdb; 327 struct net_bridge_fdb_entry *fdb;
@@ -320,8 +335,7 @@ static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
320 335
321static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, 336static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
322 struct net_bridge_port *source, 337 struct net_bridge_port *source,
323 const unsigned char *addr, 338 const unsigned char *addr)
324 int is_local)
325{ 339{
326 struct net_bridge_fdb_entry *fdb; 340 struct net_bridge_fdb_entry *fdb;
327 341
@@ -329,11 +343,11 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
329 if (fdb) { 343 if (fdb) {
330 memcpy(fdb->addr.addr, addr, ETH_ALEN); 344 memcpy(fdb->addr.addr, addr, ETH_ALEN);
331 fdb->dst = source; 345 fdb->dst = source;
332 fdb->is_local = is_local; 346 fdb->is_local = 0;
333 fdb->is_static = is_local; 347 fdb->is_static = 0;
334 fdb->ageing_timer = jiffies; 348 fdb->updated = fdb->used = jiffies;
335
336 hlist_add_head_rcu(&fdb->hlist, head); 349 hlist_add_head_rcu(&fdb->hlist, head);
350 fdb_notify(fdb, RTM_NEWNEIGH);
337 } 351 }
338 return fdb; 352 return fdb;
339} 353}
@@ -360,12 +374,15 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
360 fdb_delete(fdb); 374 fdb_delete(fdb);
361 } 375 }
362 376
363 if (!fdb_create(head, source, addr, 1)) 377 fdb = fdb_create(head, source, addr);
378 if (!fdb)
364 return -ENOMEM; 379 return -ENOMEM;
365 380
381 fdb->is_local = fdb->is_static = 1;
366 return 0; 382 return 0;
367} 383}
368 384
385/* Add entry for local address of interface */
369int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 386int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
370 const unsigned char *addr) 387 const unsigned char *addr)
371{ 388{
@@ -392,7 +409,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
392 source->state == BR_STATE_FORWARDING)) 409 source->state == BR_STATE_FORWARDING))
393 return; 410 return;
394 411
395 fdb = fdb_find(head, addr); 412 fdb = fdb_find_rcu(head, addr);
396 if (likely(fdb)) { 413 if (likely(fdb)) {
397 /* attempt to update an entry for a local interface */ 414 /* attempt to update an entry for a local interface */
398 if (unlikely(fdb->is_local)) { 415 if (unlikely(fdb->is_local)) {
@@ -403,15 +420,277 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
403 } else { 420 } else {
404 /* fastpath: update of existing entry */ 421 /* fastpath: update of existing entry */
405 fdb->dst = source; 422 fdb->dst = source;
406 fdb->ageing_timer = jiffies; 423 fdb->updated = jiffies;
407 } 424 }
408 } else { 425 } else {
409 spin_lock(&br->hash_lock); 426 spin_lock(&br->hash_lock);
410 if (!fdb_find(head, addr)) 427 if (likely(!fdb_find(head, addr)))
411 fdb_create(head, source, addr, 0); 428 fdb_create(head, source, addr);
429
412 /* else we lose race and someone else inserts 430 /* else we lose race and someone else inserts
413 * it first, don't bother updating 431 * it first, don't bother updating
414 */ 432 */
415 spin_unlock(&br->hash_lock); 433 spin_unlock(&br->hash_lock);
416 } 434 }
417} 435}
436
437static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
438{
439 if (fdb->is_local)
440 return NUD_PERMANENT;
441 else if (fdb->is_static)
442 return NUD_NOARP;
443 else if (has_expired(fdb->dst->br, fdb))
444 return NUD_STALE;
445 else
446 return NUD_REACHABLE;
447}
448
449static int fdb_fill_info(struct sk_buff *skb,
450 const struct net_bridge_fdb_entry *fdb,
451 u32 pid, u32 seq, int type, unsigned int flags)
452{
453 unsigned long now = jiffies;
454 struct nda_cacheinfo ci;
455 struct nlmsghdr *nlh;
456 struct ndmsg *ndm;
457
458 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
459 if (nlh == NULL)
460 return -EMSGSIZE;
461
462
463 ndm = nlmsg_data(nlh);
464 ndm->ndm_family = AF_BRIDGE;
465 ndm->ndm_pad1 = 0;
466 ndm->ndm_pad2 = 0;
467 ndm->ndm_flags = 0;
468 ndm->ndm_type = 0;
469 ndm->ndm_ifindex = fdb->dst->dev->ifindex;
470 ndm->ndm_state = fdb_to_nud(fdb);
471
472 NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
473
474 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
475 ci.ndm_confirmed = 0;
476 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
477 ci.ndm_refcnt = 0;
478 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
479
480 return nlmsg_end(skb, nlh);
481
482nla_put_failure:
483 nlmsg_cancel(skb, nlh);
484 return -EMSGSIZE;
485}
486
487static inline size_t fdb_nlmsg_size(void)
488{
489 return NLMSG_ALIGN(sizeof(struct ndmsg))
490 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
491 + nla_total_size(sizeof(struct nda_cacheinfo));
492}
493
494static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
495{
496 struct net *net = dev_net(fdb->dst->dev);
497 struct sk_buff *skb;
498 int err = -ENOBUFS;
499
500 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
501 if (skb == NULL)
502 goto errout;
503
504 err = fdb_fill_info(skb, fdb, 0, 0, type, 0);
505 if (err < 0) {
506 /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
507 WARN_ON(err == -EMSGSIZE);
508 kfree_skb(skb);
509 goto errout;
510 }
511 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
512 return;
513errout:
514 if (err < 0)
515 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
516}
517
518/* Dump information about entries, in response to GETNEIGH */
519int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
520{
521 struct net *net = sock_net(skb->sk);
522 struct net_device *dev;
523 int idx = 0;
524
525 rcu_read_lock();
526 for_each_netdev_rcu(net, dev) {
527 struct net_bridge *br = netdev_priv(dev);
528 int i;
529
530 if (!(dev->priv_flags & IFF_EBRIDGE))
531 continue;
532
533 for (i = 0; i < BR_HASH_SIZE; i++) {
534 struct hlist_node *h;
535 struct net_bridge_fdb_entry *f;
536
537 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
538 if (idx < cb->args[0])
539 goto skip;
540
541 if (fdb_fill_info(skb, f,
542 NETLINK_CB(cb->skb).pid,
543 cb->nlh->nlmsg_seq,
544 RTM_NEWNEIGH,
545 NLM_F_MULTI) < 0)
546 break;
547skip:
548 ++idx;
549 }
550 }
551 }
552 rcu_read_unlock();
553
554 cb->args[0] = idx;
555
556 return skb->len;
557}
558
559/* Create new static fdb entry */
560static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
561 __u16 state)
562{
563 struct net_bridge *br = source->br;
564 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
565 struct net_bridge_fdb_entry *fdb;
566
567 fdb = fdb_find(head, addr);
568 if (fdb)
569 return -EEXIST;
570
571 fdb = fdb_create(head, source, addr);
572 if (!fdb)
573 return -ENOMEM;
574
575 if (state & NUD_PERMANENT)
576 fdb->is_local = fdb->is_static = 1;
577 else if (state & NUD_NOARP)
578 fdb->is_static = 1;
579 return 0;
580}
581
582/* Add new permanent fdb entry with RTM_NEWNEIGH */
583int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
584{
585 struct net *net = sock_net(skb->sk);
586 struct ndmsg *ndm;
587 struct nlattr *tb[NDA_MAX+1];
588 struct net_device *dev;
589 struct net_bridge_port *p;
590 const __u8 *addr;
591 int err;
592
593 ASSERT_RTNL();
594 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
595 if (err < 0)
596 return err;
597
598 ndm = nlmsg_data(nlh);
599 if (ndm->ndm_ifindex == 0) {
600 pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n");
601 return -EINVAL;
602 }
603
604 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
605 if (dev == NULL) {
606 pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n");
607 return -ENODEV;
608 }
609
610 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
611 pr_info("bridge: RTM_NEWNEIGH with invalid address\n");
612 return -EINVAL;
613 }
614
615 addr = nla_data(tb[NDA_LLADDR]);
616 if (!is_valid_ether_addr(addr)) {
617 pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
618 return -EINVAL;
619 }
620
621 p = br_port_get_rtnl(dev);
622 if (p == NULL) {
623 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
624 dev->name);
625 return -EINVAL;
626 }
627
628 spin_lock_bh(&p->br->hash_lock);
629 err = fdb_add_entry(p, addr, ndm->ndm_state);
630 spin_unlock_bh(&p->br->hash_lock);
631
632 return err;
633}
634
635static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
636{
637 struct net_bridge *br = p->br;
638 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
639 struct net_bridge_fdb_entry *fdb;
640
641 fdb = fdb_find(head, addr);
642 if (!fdb)
643 return -ENOENT;
644
645 fdb_delete(fdb);
646 return 0;
647}
648
649/* Remove neighbor entry with RTM_DELNEIGH */
650int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
651{
652 struct net *net = sock_net(skb->sk);
653 struct ndmsg *ndm;
654 struct net_bridge_port *p;
655 struct nlattr *llattr;
656 const __u8 *addr;
657 struct net_device *dev;
658 int err;
659
660 ASSERT_RTNL();
661 if (nlmsg_len(nlh) < sizeof(*ndm))
662 return -EINVAL;
663
664 ndm = nlmsg_data(nlh);
665 if (ndm->ndm_ifindex == 0) {
666 pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n");
667 return -EINVAL;
668 }
669
670 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
671 if (dev == NULL) {
672 pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n");
673 return -ENODEV;
674 }
675
676 llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
677 if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
678 pr_info("bridge: RTM_DELNEIGH with invalid address\n");
679 return -EINVAL;
680 }
681
682 addr = nla_data(llattr);
683
684 p = br_port_get_rtnl(dev);
685 if (p == NULL) {
686 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
687 dev->name);
688 return -EINVAL;
689 }
690
691 spin_lock_bh(&p->br->hash_lock);
692 err = fdb_delete_by_addr(p, addr);
693 spin_unlock_bh(&p->br->hash_lock);
694
695 return err;
696}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 718b60366dfe..5dbdfdfc3a34 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -36,8 +36,8 @@ static int port_cost(struct net_device *dev)
36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) { 36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, }; 37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
38 38
39 if (!dev->ethtool_ops->get_settings(dev, &ecmd)) { 39 if (!dev_ethtool_get_settings(dev, &ecmd)) {
40 switch(ecmd.speed) { 40 switch (ethtool_cmd_speed(&ecmd)) {
41 case SPEED_10000: 41 case SPEED_10000:
42 return 2; 42 return 2;
43 case SPEED_1000: 43 case SPEED_1000:
@@ -175,56 +175,6 @@ static void del_br(struct net_bridge *br, struct list_head *head)
175 unregister_netdevice_queue(br->dev, head); 175 unregister_netdevice_queue(br->dev, head);
176} 176}
177 177
178static struct net_device *new_bridge_dev(struct net *net, const char *name)
179{
180 struct net_bridge *br;
181 struct net_device *dev;
182
183 dev = alloc_netdev(sizeof(struct net_bridge), name,
184 br_dev_setup);
185
186 if (!dev)
187 return NULL;
188 dev_net_set(dev, net);
189
190 br = netdev_priv(dev);
191 br->dev = dev;
192
193 br->stats = alloc_percpu(struct br_cpu_netstats);
194 if (!br->stats) {
195 free_netdev(dev);
196 return NULL;
197 }
198
199 spin_lock_init(&br->lock);
200 INIT_LIST_HEAD(&br->port_list);
201 spin_lock_init(&br->hash_lock);
202
203 br->bridge_id.prio[0] = 0x80;
204 br->bridge_id.prio[1] = 0x00;
205
206 memcpy(br->group_addr, br_group_address, ETH_ALEN);
207
208 br->feature_mask = dev->features;
209 br->stp_enabled = BR_NO_STP;
210 br->designated_root = br->bridge_id;
211 br->root_path_cost = 0;
212 br->root_port = 0;
213 br->bridge_max_age = br->max_age = 20 * HZ;
214 br->bridge_hello_time = br->hello_time = 2 * HZ;
215 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
216 br->topology_change = 0;
217 br->topology_change_detected = 0;
218 br->ageing_time = 300 * HZ;
219
220 br_netfilter_rtable_init(br);
221
222 br_stp_timer_init(br);
223 br_multicast_init(br);
224
225 return dev;
226}
227
228/* find an available port number */ 178/* find an available port number */
229static int find_portno(struct net_bridge *br) 179static int find_portno(struct net_bridge *br)
230{ 180{
@@ -277,42 +227,19 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
277 return p; 227 return p;
278} 228}
279 229
280static struct device_type br_type = {
281 .name = "bridge",
282};
283
284int br_add_bridge(struct net *net, const char *name) 230int br_add_bridge(struct net *net, const char *name)
285{ 231{
286 struct net_device *dev; 232 struct net_device *dev;
287 int ret;
288 233
289 dev = new_bridge_dev(net, name); 234 dev = alloc_netdev(sizeof(struct net_bridge), name,
235 br_dev_setup);
236
290 if (!dev) 237 if (!dev)
291 return -ENOMEM; 238 return -ENOMEM;
292 239
293 rtnl_lock(); 240 dev_net_set(dev, net);
294 if (strchr(dev->name, '%')) {
295 ret = dev_alloc_name(dev, dev->name);
296 if (ret < 0)
297 goto out_free;
298 }
299
300 SET_NETDEV_DEVTYPE(dev, &br_type);
301
302 ret = register_netdevice(dev);
303 if (ret)
304 goto out_free;
305
306 ret = br_sysfs_addbr(dev);
307 if (ret)
308 unregister_netdevice(dev);
309 out:
310 rtnl_unlock();
311 return ret;
312 241
313out_free: 242 return register_netdev(dev);
314 free_netdev(dev);
315 goto out;
316} 243}
317 244
318int br_del_bridge(struct net *net, const char *name) 245int br_del_bridge(struct net *net, const char *name)
@@ -364,15 +291,15 @@ int br_min_mtu(const struct net_bridge *br)
364/* 291/*
365 * Recomputes features using slave's features 292 * Recomputes features using slave's features
366 */ 293 */
367void br_features_recompute(struct net_bridge *br) 294u32 br_features_recompute(struct net_bridge *br, u32 features)
368{ 295{
369 struct net_bridge_port *p; 296 struct net_bridge_port *p;
370 u32 features, mask; 297 u32 mask;
371 298
372 features = mask = br->feature_mask;
373 if (list_empty(&br->port_list)) 299 if (list_empty(&br->port_list))
374 goto done; 300 return features;
375 301
302 mask = features;
376 features &= ~NETIF_F_ONE_FOR_ALL; 303 features &= ~NETIF_F_ONE_FOR_ALL;
377 304
378 list_for_each_entry(p, &br->port_list, list) { 305 list_for_each_entry(p, &br->port_list, list) {
@@ -380,8 +307,7 @@ void br_features_recompute(struct net_bridge *br)
380 p->dev->features, mask); 307 p->dev->features, mask);
381 } 308 }
382 309
383done: 310 return features;
384 br->dev->features = netdev_fix_features(br->dev, features);
385} 311}
386 312
387/* called with RTNL */ 313/* called with RTNL */
@@ -446,9 +372,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
446 372
447 list_add_rcu(&p->list, &br->port_list); 373 list_add_rcu(&p->list, &br->port_list);
448 374
375 netdev_update_features(br->dev);
376
449 spin_lock_bh(&br->lock); 377 spin_lock_bh(&br->lock);
450 changed_addr = br_stp_recalculate_bridge_id(br); 378 changed_addr = br_stp_recalculate_bridge_id(br);
451 br_features_recompute(br);
452 379
453 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && 380 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
454 (br->dev->flags & IFF_UP)) 381 (br->dev->flags & IFF_UP))
@@ -496,9 +423,10 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
496 423
497 spin_lock_bh(&br->lock); 424 spin_lock_bh(&br->lock);
498 br_stp_recalculate_bridge_id(br); 425 br_stp_recalculate_bridge_id(br);
499 br_features_recompute(br);
500 spin_unlock_bh(&br->lock); 426 spin_unlock_bh(&br->lock);
501 427
428 netdev_update_features(br->dev);
429
502 return 0; 430 return 0;
503} 431}
504 432
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 0c7badad62af..f3ac1e858ee1 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -98,9 +98,10 @@ int br_handle_frame_finish(struct sk_buff *skb)
98 } 98 }
99 99
100 if (skb) { 100 if (skb) {
101 if (dst) 101 if (dst) {
102 dst->used = jiffies;
102 br_forward(dst->dst, skb, skb2); 103 br_forward(dst->dst, skb, skb2);
103 else 104 } else
104 br_flood_forward(br, skb, skb2); 105 br_flood_forward(br, skb, skb2);
105 } 106 }
106 107
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 3d9fca0e3370..7222fe1d5460 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -181,40 +181,19 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
181 if (!capable(CAP_NET_ADMIN)) 181 if (!capable(CAP_NET_ADMIN))
182 return -EPERM; 182 return -EPERM;
183 183
184 spin_lock_bh(&br->lock); 184 return br_set_forward_delay(br, args[1]);
185 br->bridge_forward_delay = clock_t_to_jiffies(args[1]);
186 if (br_is_root_bridge(br))
187 br->forward_delay = br->bridge_forward_delay;
188 spin_unlock_bh(&br->lock);
189 return 0;
190 185
191 case BRCTL_SET_BRIDGE_HELLO_TIME: 186 case BRCTL_SET_BRIDGE_HELLO_TIME:
192 {
193 unsigned long t = clock_t_to_jiffies(args[1]);
194 if (!capable(CAP_NET_ADMIN)) 187 if (!capable(CAP_NET_ADMIN))
195 return -EPERM; 188 return -EPERM;
196 189
197 if (t < HZ) 190 return br_set_hello_time(br, args[1]);
198 return -EINVAL;
199
200 spin_lock_bh(&br->lock);
201 br->bridge_hello_time = t;
202 if (br_is_root_bridge(br))
203 br->hello_time = br->bridge_hello_time;
204 spin_unlock_bh(&br->lock);
205 return 0;
206 }
207 191
208 case BRCTL_SET_BRIDGE_MAX_AGE: 192 case BRCTL_SET_BRIDGE_MAX_AGE:
209 if (!capable(CAP_NET_ADMIN)) 193 if (!capable(CAP_NET_ADMIN))
210 return -EPERM; 194 return -EPERM;
211 195
212 spin_lock_bh(&br->lock); 196 return br_set_max_age(br, args[1]);
213 br->bridge_max_age = clock_t_to_jiffies(args[1]);
214 if (br_is_root_bridge(br))
215 br->max_age = br->bridge_max_age;
216 spin_unlock_bh(&br->lock);
217 return 0;
218 197
219 case BRCTL_SET_AGEING_TIME: 198 case BRCTL_SET_AGEING_TIME:
220 if (!capable(CAP_NET_ADMIN)) 199 if (!capable(CAP_NET_ADMIN))
@@ -275,19 +254,16 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
275 case BRCTL_SET_PORT_PRIORITY: 254 case BRCTL_SET_PORT_PRIORITY:
276 { 255 {
277 struct net_bridge_port *p; 256 struct net_bridge_port *p;
278 int ret = 0; 257 int ret;
279 258
280 if (!capable(CAP_NET_ADMIN)) 259 if (!capable(CAP_NET_ADMIN))
281 return -EPERM; 260 return -EPERM;
282 261
283 if (args[2] >= (1<<(16-BR_PORT_BITS)))
284 return -ERANGE;
285
286 spin_lock_bh(&br->lock); 262 spin_lock_bh(&br->lock);
287 if ((p = br_get_port(br, args[1])) == NULL) 263 if ((p = br_get_port(br, args[1])) == NULL)
288 ret = -EINVAL; 264 ret = -EINVAL;
289 else 265 else
290 br_stp_set_port_priority(p, args[2]); 266 ret = br_stp_set_port_priority(p, args[2]);
291 spin_unlock_bh(&br->lock); 267 spin_unlock_bh(&br->lock);
292 return ret; 268 return ret;
293 } 269 }
@@ -295,15 +271,17 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
295 case BRCTL_SET_PATH_COST: 271 case BRCTL_SET_PATH_COST:
296 { 272 {
297 struct net_bridge_port *p; 273 struct net_bridge_port *p;
298 int ret = 0; 274 int ret;
299 275
300 if (!capable(CAP_NET_ADMIN)) 276 if (!capable(CAP_NET_ADMIN))
301 return -EPERM; 277 return -EPERM;
302 278
279 spin_lock_bh(&br->lock);
303 if ((p = br_get_port(br, args[1])) == NULL) 280 if ((p = br_get_port(br, args[1])) == NULL)
304 ret = -EINVAL; 281 ret = -EINVAL;
305 else 282 else
306 br_stp_set_path_cost(p, args[2]); 283 ret = br_stp_set_path_cost(p, args[2]);
284 spin_unlock_bh(&br->lock);
307 285
308 return ret; 286 return ret;
309 } 287 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 59660c909a7c..2f14eafdeeab 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -413,7 +413,7 @@ out:
413 413
414#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 414#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
415static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 415static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
416 struct in6_addr *group) 416 const struct in6_addr *group)
417{ 417{
418 struct sk_buff *skb; 418 struct sk_buff *skb;
419 struct ipv6hdr *ip6h; 419 struct ipv6hdr *ip6h;
@@ -1115,7 +1115,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1115 struct net_bridge_port *port, 1115 struct net_bridge_port *port,
1116 struct sk_buff *skb) 1116 struct sk_buff *skb)
1117{ 1117{
1118 struct iphdr *iph = ip_hdr(skb); 1118 const struct iphdr *iph = ip_hdr(skb);
1119 struct igmphdr *ih = igmp_hdr(skb); 1119 struct igmphdr *ih = igmp_hdr(skb);
1120 struct net_bridge_mdb_entry *mp; 1120 struct net_bridge_mdb_entry *mp;
1121 struct igmpv3_query *ih3; 1121 struct igmpv3_query *ih3;
@@ -1190,7 +1190,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1190 struct net_bridge_port *port, 1190 struct net_bridge_port *port,
1191 struct sk_buff *skb) 1191 struct sk_buff *skb)
1192{ 1192{
1193 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1193 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1194 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); 1194 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
1195 struct net_bridge_mdb_entry *mp; 1195 struct net_bridge_mdb_entry *mp;
1196 struct mld2_query *mld2q; 1196 struct mld2_query *mld2q;
@@ -1198,7 +1198,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1198 struct net_bridge_port_group __rcu **pp; 1198 struct net_bridge_port_group __rcu **pp;
1199 unsigned long max_delay; 1199 unsigned long max_delay;
1200 unsigned long now = jiffies; 1200 unsigned long now = jiffies;
1201 struct in6_addr *group = NULL; 1201 const struct in6_addr *group = NULL;
1202 int err = 0; 1202 int err = 0;
1203 1203
1204 spin_lock(&br->multicast_lock); 1204 spin_lock(&br->multicast_lock);
@@ -1356,7 +1356,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1356 struct sk_buff *skb) 1356 struct sk_buff *skb)
1357{ 1357{
1358 struct sk_buff *skb2 = skb; 1358 struct sk_buff *skb2 = skb;
1359 struct iphdr *iph; 1359 const struct iphdr *iph;
1360 struct igmphdr *ih; 1360 struct igmphdr *ih;
1361 unsigned len; 1361 unsigned len;
1362 unsigned offset; 1362 unsigned offset;
@@ -1452,7 +1452,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1452 struct sk_buff *skb) 1452 struct sk_buff *skb)
1453{ 1453{
1454 struct sk_buff *skb2; 1454 struct sk_buff *skb2;
1455 struct ipv6hdr *ip6h; 1455 const struct ipv6hdr *ip6h;
1456 struct icmp6hdr *icmp6h; 1456 struct icmp6hdr *icmp6h;
1457 u8 nexthdr; 1457 u8 nexthdr;
1458 unsigned len; 1458 unsigned len;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index f3bc322c5891..5614907525e1 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -219,7 +219,7 @@ static inline void nf_bridge_update_protocol(struct sk_buff *skb)
219static int br_parse_ip_options(struct sk_buff *skb) 219static int br_parse_ip_options(struct sk_buff *skb)
220{ 220{
221 struct ip_options *opt; 221 struct ip_options *opt;
222 struct iphdr *iph; 222 const struct iphdr *iph;
223 struct net_device *dev = skb->dev; 223 struct net_device *dev = skb->dev;
224 u32 len; 224 u32 len;
225 225
@@ -554,7 +554,7 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
554 const struct net_device *out, 554 const struct net_device *out,
555 int (*okfn)(struct sk_buff *)) 555 int (*okfn)(struct sk_buff *))
556{ 556{
557 struct ipv6hdr *hdr; 557 const struct ipv6hdr *hdr;
558 u32 pkt_len; 558 u32 pkt_len;
559 559
560 if (skb->len < sizeof(struct ipv6hdr)) 560 if (skb->len < sizeof(struct ipv6hdr))
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f8bf4c7f842c..ffb0dc4cc0e8 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -12,9 +12,11 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/etherdevice.h>
15#include <net/rtnetlink.h> 16#include <net/rtnetlink.h>
16#include <net/net_namespace.h> 17#include <net/net_namespace.h>
17#include <net/sock.h> 18#include <net/sock.h>
19
18#include "br_private.h" 20#include "br_private.h"
19 21
20static inline size_t br_nlmsg_size(void) 22static inline size_t br_nlmsg_size(void)
@@ -118,8 +120,9 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
118 int idx; 120 int idx;
119 121
120 idx = 0; 122 idx = 0;
121 for_each_netdev(net, dev) { 123 rcu_read_lock();
122 struct net_bridge_port *port = br_port_get_rtnl(dev); 124 for_each_netdev_rcu(net, dev) {
125 struct net_bridge_port *port = br_port_get_rcu(dev);
123 126
124 /* not a bridge port */ 127 /* not a bridge port */
125 if (!port || idx < cb->args[0]) 128 if (!port || idx < cb->args[0])
@@ -133,7 +136,7 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
133skip: 136skip:
134 ++idx; 137 ++idx;
135 } 138 }
136 139 rcu_read_unlock();
137 cb->args[0] = idx; 140 cb->args[0] = idx;
138 141
139 return skb->len; 142 return skb->len;
@@ -188,20 +191,61 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
188 return 0; 191 return 0;
189} 192}
190 193
194static int br_validate(struct nlattr *tb[], struct nlattr *data[])
195{
196 if (tb[IFLA_ADDRESS]) {
197 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
198 return -EINVAL;
199 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
200 return -EADDRNOTAVAIL;
201 }
202
203 return 0;
204}
205
206static struct rtnl_link_ops br_link_ops __read_mostly = {
207 .kind = "bridge",
208 .priv_size = sizeof(struct net_bridge),
209 .setup = br_dev_setup,
210 .validate = br_validate,
211};
191 212
192int __init br_netlink_init(void) 213int __init br_netlink_init(void)
193{ 214{
194 if (__rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo)) 215 int err;
195 return -ENOBUFS;
196 216
197 /* Only the first call to __rtnl_register can fail */ 217 err = rtnl_link_register(&br_link_ops);
198 __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL); 218 if (err < 0)
219 goto err1;
220
221 err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo);
222 if (err)
223 goto err2;
224 err = __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL);
225 if (err)
226 goto err3;
227 err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, br_fdb_add, NULL);
228 if (err)
229 goto err3;
230 err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, br_fdb_delete, NULL);
231 if (err)
232 goto err3;
233 err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, br_fdb_dump);
234 if (err)
235 goto err3;
199 236
200 return 0; 237 return 0;
238
239err3:
240 rtnl_unregister_all(PF_BRIDGE);
241err2:
242 rtnl_link_unregister(&br_link_ops);
243err1:
244 return err;
201} 245}
202 246
203void __exit br_netlink_fini(void) 247void __exit br_netlink_fini(void)
204{ 248{
249 rtnl_link_unregister(&br_link_ops);
205 rtnl_unregister_all(PF_BRIDGE); 250 rtnl_unregister_all(PF_BRIDGE);
206} 251}
207
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 7d337c9b6082..6545ee9591d1 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -36,6 +36,12 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
36 struct net_bridge *br; 36 struct net_bridge *br;
37 int err; 37 int err;
38 38
39 /* register of bridge completed, add sysfs entries */
40 if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
41 br_sysfs_addbr(dev);
42 return NOTIFY_DONE;
43 }
44
39 /* not a port of a bridge */ 45 /* not a port of a bridge */
40 p = br_port_get_rtnl(dev); 46 p = br_port_get_rtnl(dev);
41 if (!p) 47 if (!p)
@@ -60,10 +66,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
60 break; 66 break;
61 67
62 case NETDEV_FEAT_CHANGE: 68 case NETDEV_FEAT_CHANGE:
63 spin_lock_bh(&br->lock); 69 netdev_update_features(br->dev);
64 if (netif_running(br->dev))
65 br_features_recompute(br);
66 spin_unlock_bh(&br->lock);
67 break; 70 break;
68 71
69 case NETDEV_DOWN: 72 case NETDEV_DOWN:
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 387013d33745..54578f274d85 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -64,7 +64,8 @@ struct net_bridge_fdb_entry
64 struct net_bridge_port *dst; 64 struct net_bridge_port *dst;
65 65
66 struct rcu_head rcu; 66 struct rcu_head rcu;
67 unsigned long ageing_timer; 67 unsigned long updated;
68 unsigned long used;
68 mac_addr addr; 69 mac_addr addr;
69 unsigned char is_local; 70 unsigned char is_local;
70 unsigned char is_static; 71 unsigned char is_static;
@@ -182,7 +183,6 @@ struct net_bridge
182 struct br_cpu_netstats __percpu *stats; 183 struct br_cpu_netstats __percpu *stats;
183 spinlock_t hash_lock; 184 spinlock_t hash_lock;
184 struct hlist_head hash[BR_HASH_SIZE]; 185 struct hlist_head hash[BR_HASH_SIZE];
185 u32 feature_mask;
186#ifdef CONFIG_BRIDGE_NETFILTER 186#ifdef CONFIG_BRIDGE_NETFILTER
187 struct rtable fake_rtable; 187 struct rtable fake_rtable;
188 bool nf_call_iptables; 188 bool nf_call_iptables;
@@ -353,6 +353,9 @@ extern int br_fdb_insert(struct net_bridge *br,
353extern void br_fdb_update(struct net_bridge *br, 353extern void br_fdb_update(struct net_bridge *br,
354 struct net_bridge_port *source, 354 struct net_bridge_port *source,
355 const unsigned char *addr); 355 const unsigned char *addr);
356extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb);
357extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
358extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
356 359
357/* br_forward.c */ 360/* br_forward.c */
358extern void br_deliver(const struct net_bridge_port *to, 361extern void br_deliver(const struct net_bridge_port *to,
@@ -375,7 +378,7 @@ extern int br_add_if(struct net_bridge *br,
375extern int br_del_if(struct net_bridge *br, 378extern int br_del_if(struct net_bridge *br,
376 struct net_device *dev); 379 struct net_device *dev);
377extern int br_min_mtu(const struct net_bridge *br); 380extern int br_min_mtu(const struct net_bridge *br);
378extern void br_features_recompute(struct net_bridge *br); 381extern u32 br_features_recompute(struct net_bridge *br, u32 features);
379 382
380/* br_input.c */ 383/* br_input.c */
381extern int br_handle_frame_finish(struct sk_buff *skb); 384extern int br_handle_frame_finish(struct sk_buff *skb);
@@ -491,6 +494,11 @@ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
491extern void br_init_port(struct net_bridge_port *p); 494extern void br_init_port(struct net_bridge_port *p);
492extern void br_become_designated_port(struct net_bridge_port *p); 495extern void br_become_designated_port(struct net_bridge_port *p);
493 496
497extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
498extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
499extern int br_set_max_age(struct net_bridge *br, unsigned long x);
500
501
494/* br_stp_if.c */ 502/* br_stp_if.c */
495extern void br_stp_enable_bridge(struct net_bridge *br); 503extern void br_stp_enable_bridge(struct net_bridge *br);
496extern void br_stp_disable_bridge(struct net_bridge *br); 504extern void br_stp_disable_bridge(struct net_bridge *br);
@@ -501,10 +509,10 @@ extern bool br_stp_recalculate_bridge_id(struct net_bridge *br);
501extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a); 509extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
502extern void br_stp_set_bridge_priority(struct net_bridge *br, 510extern void br_stp_set_bridge_priority(struct net_bridge *br,
503 u16 newprio); 511 u16 newprio);
504extern void br_stp_set_port_priority(struct net_bridge_port *p, 512extern int br_stp_set_port_priority(struct net_bridge_port *p,
505 u8 newprio); 513 unsigned long newprio);
506extern void br_stp_set_path_cost(struct net_bridge_port *p, 514extern int br_stp_set_path_cost(struct net_bridge_port *p,
507 u32 path_cost); 515 unsigned long path_cost);
508extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id); 516extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
509 517
510/* br_stp_bpdu.c */ 518/* br_stp_bpdu.c */
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 8b650f7fbfa0..642ef47a867e 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -16,6 +16,19 @@
16#define BPDU_TYPE_CONFIG 0 16#define BPDU_TYPE_CONFIG 0
17#define BPDU_TYPE_TCN 0x80 17#define BPDU_TYPE_TCN 0x80
18 18
19/* IEEE 802.1D-1998 timer values */
20#define BR_MIN_HELLO_TIME (1*HZ)
21#define BR_MAX_HELLO_TIME (10*HZ)
22
23#define BR_MIN_FORWARD_DELAY (2*HZ)
24#define BR_MAX_FORWARD_DELAY (30*HZ)
25
26#define BR_MIN_MAX_AGE (6*HZ)
27#define BR_MAX_MAX_AGE (40*HZ)
28
29#define BR_MIN_PATH_COST 1
30#define BR_MAX_PATH_COST 65535
31
19struct br_config_bpdu 32struct br_config_bpdu
20{ 33{
21 unsigned topology_change:1; 34 unsigned topology_change:1;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 7370d14f634d..bb4383e84de9 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -484,3 +484,51 @@ void br_received_tcn_bpdu(struct net_bridge_port *p)
484 br_topology_change_acknowledge(p); 484 br_topology_change_acknowledge(p);
485 } 485 }
486} 486}
487
488/* Change bridge STP parameter */
489int br_set_hello_time(struct net_bridge *br, unsigned long val)
490{
491 unsigned long t = clock_t_to_jiffies(val);
492
493 if (t < BR_MIN_HELLO_TIME || t > BR_MAX_HELLO_TIME)
494 return -ERANGE;
495
496 spin_lock_bh(&br->lock);
497 br->bridge_hello_time = t;
498 if (br_is_root_bridge(br))
499 br->hello_time = br->bridge_hello_time;
500 spin_unlock_bh(&br->lock);
501 return 0;
502}
503
504int br_set_max_age(struct net_bridge *br, unsigned long val)
505{
506 unsigned long t = clock_t_to_jiffies(val);
507
508 if (t < BR_MIN_MAX_AGE || t > BR_MAX_MAX_AGE)
509 return -ERANGE;
510
511 spin_lock_bh(&br->lock);
512 br->bridge_max_age = t;
513 if (br_is_root_bridge(br))
514 br->max_age = br->bridge_max_age;
515 spin_unlock_bh(&br->lock);
516 return 0;
517
518}
519
520int br_set_forward_delay(struct net_bridge *br, unsigned long val)
521{
522 unsigned long t = clock_t_to_jiffies(val);
523
524 if (br->stp_enabled != BR_NO_STP &&
525 (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
526 return -ERANGE;
527
528 spin_lock_bh(&br->lock);
529 br->bridge_forward_delay = t;
530 if (br_is_root_bridge(br))
531 br->forward_delay = br->bridge_forward_delay;
532 spin_unlock_bh(&br->lock);
533 return 0;
534}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 9b61d09de9b9..6f615b8192f4 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -20,7 +20,7 @@
20 20
21 21
22/* Port id is composed of priority and port number. 22/* Port id is composed of priority and port number.
23 * NB: least significant bits of priority are dropped to 23 * NB: some bits of priority are dropped to
24 * make room for more ports. 24 * make room for more ports.
25 */ 25 */
26static inline port_id br_make_port_id(__u8 priority, __u16 port_no) 26static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
@@ -29,6 +29,8 @@ static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
29 | (port_no & ((1<<BR_PORT_BITS)-1)); 29 | (port_no & ((1<<BR_PORT_BITS)-1));
30} 30}
31 31
32#define BR_MAX_PORT_PRIORITY ((u16)~0 >> BR_PORT_BITS)
33
32/* called under bridge lock */ 34/* called under bridge lock */
33void br_init_port(struct net_bridge_port *p) 35void br_init_port(struct net_bridge_port *p)
34{ 36{
@@ -255,10 +257,14 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
255} 257}
256 258
257/* called under bridge lock */ 259/* called under bridge lock */
258void br_stp_set_port_priority(struct net_bridge_port *p, u8 newprio) 260int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio)
259{ 261{
260 port_id new_port_id = br_make_port_id(newprio, p->port_no); 262 port_id new_port_id;
263
264 if (newprio > BR_MAX_PORT_PRIORITY)
265 return -ERANGE;
261 266
267 new_port_id = br_make_port_id(newprio, p->port_no);
262 if (br_is_designated_port(p)) 268 if (br_is_designated_port(p))
263 p->designated_port = new_port_id; 269 p->designated_port = new_port_id;
264 270
@@ -269,14 +275,21 @@ void br_stp_set_port_priority(struct net_bridge_port *p, u8 newprio)
269 br_become_designated_port(p); 275 br_become_designated_port(p);
270 br_port_state_selection(p->br); 276 br_port_state_selection(p->br);
271 } 277 }
278
279 return 0;
272} 280}
273 281
274/* called under bridge lock */ 282/* called under bridge lock */
275void br_stp_set_path_cost(struct net_bridge_port *p, u32 path_cost) 283int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
276{ 284{
285 if (path_cost < BR_MIN_PATH_COST ||
286 path_cost > BR_MAX_PATH_COST)
287 return -ERANGE;
288
277 p->path_cost = path_cost; 289 p->path_cost = path_cost;
278 br_configuration_update(p->br); 290 br_configuration_update(p->br);
279 br_port_state_selection(p->br); 291 br_port_state_selection(p->br);
292 return 0;
280} 293}
281 294
282ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id) 295ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 5c1e5559ebba..68b893ea8c3a 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -43,9 +43,7 @@ static ssize_t store_bridge_parm(struct device *d,
43 if (endp == buf) 43 if (endp == buf)
44 return -EINVAL; 44 return -EINVAL;
45 45
46 spin_lock_bh(&br->lock);
47 err = (*set)(br, val); 46 err = (*set)(br, val);
48 spin_unlock_bh(&br->lock);
49 return err ? err : len; 47 return err ? err : len;
50} 48}
51 49
@@ -57,20 +55,11 @@ static ssize_t show_forward_delay(struct device *d,
57 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); 55 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
58} 56}
59 57
60static int set_forward_delay(struct net_bridge *br, unsigned long val)
61{
62 unsigned long delay = clock_t_to_jiffies(val);
63 br->forward_delay = delay;
64 if (br_is_root_bridge(br))
65 br->bridge_forward_delay = delay;
66 return 0;
67}
68
69static ssize_t store_forward_delay(struct device *d, 58static ssize_t store_forward_delay(struct device *d,
70 struct device_attribute *attr, 59 struct device_attribute *attr,
71 const char *buf, size_t len) 60 const char *buf, size_t len)
72{ 61{
73 return store_bridge_parm(d, buf, len, set_forward_delay); 62 return store_bridge_parm(d, buf, len, br_set_forward_delay);
74} 63}
75static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR, 64static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR,
76 show_forward_delay, store_forward_delay); 65 show_forward_delay, store_forward_delay);
@@ -82,24 +71,11 @@ static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
82 jiffies_to_clock_t(to_bridge(d)->hello_time)); 71 jiffies_to_clock_t(to_bridge(d)->hello_time));
83} 72}
84 73
85static int set_hello_time(struct net_bridge *br, unsigned long val)
86{
87 unsigned long t = clock_t_to_jiffies(val);
88
89 if (t < HZ)
90 return -EINVAL;
91
92 br->hello_time = t;
93 if (br_is_root_bridge(br))
94 br->bridge_hello_time = t;
95 return 0;
96}
97
98static ssize_t store_hello_time(struct device *d, 74static ssize_t store_hello_time(struct device *d,
99 struct device_attribute *attr, const char *buf, 75 struct device_attribute *attr, const char *buf,
100 size_t len) 76 size_t len)
101{ 77{
102 return store_bridge_parm(d, buf, len, set_hello_time); 78 return store_bridge_parm(d, buf, len, br_set_hello_time);
103} 79}
104static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time, 80static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time,
105 store_hello_time); 81 store_hello_time);
@@ -111,19 +87,10 @@ static ssize_t show_max_age(struct device *d, struct device_attribute *attr,
111 jiffies_to_clock_t(to_bridge(d)->max_age)); 87 jiffies_to_clock_t(to_bridge(d)->max_age));
112} 88}
113 89
114static int set_max_age(struct net_bridge *br, unsigned long val)
115{
116 unsigned long t = clock_t_to_jiffies(val);
117 br->max_age = t;
118 if (br_is_root_bridge(br))
119 br->bridge_max_age = t;
120 return 0;
121}
122
123static ssize_t store_max_age(struct device *d, struct device_attribute *attr, 90static ssize_t store_max_age(struct device *d, struct device_attribute *attr,
124 const char *buf, size_t len) 91 const char *buf, size_t len)
125{ 92{
126 return store_bridge_parm(d, buf, len, set_max_age); 93 return store_bridge_parm(d, buf, len, br_set_max_age);
127} 94}
128static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age); 95static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age);
129 96
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index fd5799c9bc8d..6229b62749e8 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -23,7 +23,7 @@
23struct brport_attribute { 23struct brport_attribute {
24 struct attribute attr; 24 struct attribute attr;
25 ssize_t (*show)(struct net_bridge_port *, char *); 25 ssize_t (*show)(struct net_bridge_port *, char *);
26 ssize_t (*store)(struct net_bridge_port *, unsigned long); 26 int (*store)(struct net_bridge_port *, unsigned long);
27}; 27};
28 28
29#define BRPORT_ATTR(_name,_mode,_show,_store) \ 29#define BRPORT_ATTR(_name,_mode,_show,_store) \
@@ -38,27 +38,17 @@ static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
38{ 38{
39 return sprintf(buf, "%d\n", p->path_cost); 39 return sprintf(buf, "%d\n", p->path_cost);
40} 40}
41static ssize_t store_path_cost(struct net_bridge_port *p, unsigned long v) 41
42{
43 br_stp_set_path_cost(p, v);
44 return 0;
45}
46static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR, 42static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR,
47 show_path_cost, store_path_cost); 43 show_path_cost, br_stp_set_path_cost);
48 44
49static ssize_t show_priority(struct net_bridge_port *p, char *buf) 45static ssize_t show_priority(struct net_bridge_port *p, char *buf)
50{ 46{
51 return sprintf(buf, "%d\n", p->priority); 47 return sprintf(buf, "%d\n", p->priority);
52} 48}
53static ssize_t store_priority(struct net_bridge_port *p, unsigned long v) 49
54{
55 if (v >= (1<<(16-BR_PORT_BITS)))
56 return -ERANGE;
57 br_stp_set_port_priority(p, v);
58 return 0;
59}
60static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR, 50static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR,
61 show_priority, store_priority); 51 show_priority, br_stp_set_port_priority);
62 52
63static ssize_t show_designated_root(struct net_bridge_port *p, char *buf) 53static ssize_t show_designated_root(struct net_bridge_port *p, char *buf)
64{ 54{
@@ -136,7 +126,7 @@ static ssize_t show_hold_timer(struct net_bridge_port *p,
136} 126}
137static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL); 127static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL);
138 128
139static ssize_t store_flush(struct net_bridge_port *p, unsigned long v) 129static int store_flush(struct net_bridge_port *p, unsigned long v)
140{ 130{
141 br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry 131 br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry
142 return 0; 132 return 0;
@@ -148,7 +138,7 @@ static ssize_t show_hairpin_mode(struct net_bridge_port *p, char *buf)
148 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0; 138 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0;
149 return sprintf(buf, "%d\n", hairpin_mode); 139 return sprintf(buf, "%d\n", hairpin_mode);
150} 140}
151static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v) 141static int store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
152{ 142{
153 if (v) 143 if (v)
154 p->flags |= BR_HAIRPIN_MODE; 144 p->flags |= BR_HAIRPIN_MODE;
@@ -165,7 +155,7 @@ static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
165 return sprintf(buf, "%d\n", p->multicast_router); 155 return sprintf(buf, "%d\n", p->multicast_router);
166} 156}
167 157
168static ssize_t store_multicast_router(struct net_bridge_port *p, 158static int store_multicast_router(struct net_bridge_port *p,
169 unsigned long v) 159 unsigned long v)
170{ 160{
171 return br_multicast_set_port_router(p, v); 161 return br_multicast_set_port_router(p, v);
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index d522d8c1703e..9b63e4e3910e 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -10,9 +10,9 @@
10#include <net/caif/cfcnfg.h> 10#include <net/caif/cfcnfg.h>
11#include <net/caif/caif_dev.h> 11#include <net/caif/caif_dev.h>
12 12
13int connect_req_to_link_param(struct cfcnfg *cnfg, 13int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
14 struct caif_connect_request *s, 14 struct caif_connect_request *s,
15 struct cfctrl_link_param *l) 15 struct cfctrl_link_param *l)
16{ 16{
17 struct dev_info *dev_info; 17 struct dev_info *dev_info;
18 enum cfcnfg_phy_preference pref; 18 enum cfcnfg_phy_preference pref;
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index a42a408306e4..75e00d59eb49 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -120,25 +120,12 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
120{ 120{
121 struct caif_device_entry *caifd = 121 struct caif_device_entry *caifd =
122 container_of(layer, struct caif_device_entry, layer); 122 container_of(layer, struct caif_device_entry, layer);
123 struct sk_buff *skb, *skb2; 123 struct sk_buff *skb;
124 int ret = -EINVAL; 124
125 skb = cfpkt_tonative(pkt); 125 skb = cfpkt_tonative(pkt);
126 skb->dev = caifd->netdev; 126 skb->dev = caifd->netdev;
127 /* 127
128 * Don't allow SKB to be destroyed upon error, but signal resend 128 dev_queue_xmit(skb);
129 * notification to clients. We can't rely on the return value as
130 * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
131 */
132 if (netif_queue_stopped(caifd->netdev))
133 return -EAGAIN;
134 skb2 = skb_get(skb);
135
136 ret = dev_queue_xmit(skb2);
137
138 if (!ret)
139 kfree_skb(skb);
140 else
141 return -EAGAIN;
142 129
143 return 0; 130 return 0;
144} 131}
@@ -146,9 +133,7 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
146static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) 133static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
147{ 134{
148 struct caif_device_entry *caifd; 135 struct caif_device_entry *caifd;
149 struct caif_dev_common *caifdev;
150 caifd = container_of(layr, struct caif_device_entry, layer); 136 caifd = container_of(layr, struct caif_device_entry, layer);
151 caifdev = netdev_priv(caifd->netdev);
152 if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) { 137 if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
153 atomic_set(&caifd->in_use, 1); 138 atomic_set(&caifd->in_use, 1);
154 wake_up_interruptible(&caifd->event); 139 wake_up_interruptible(&caifd->event);
@@ -167,10 +152,8 @@ static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
167static int receive(struct sk_buff *skb, struct net_device *dev, 152static int receive(struct sk_buff *skb, struct net_device *dev,
168 struct packet_type *pkttype, struct net_device *orig_dev) 153 struct packet_type *pkttype, struct net_device *orig_dev)
169{ 154{
170 struct net *net;
171 struct cfpkt *pkt; 155 struct cfpkt *pkt;
172 struct caif_device_entry *caifd; 156 struct caif_device_entry *caifd;
173 net = dev_net(dev);
174 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 157 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
175 caifd = caif_get(dev); 158 caifd = caif_get(dev);
176 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive) 159 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive)
@@ -208,7 +191,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
208 struct caif_device_entry *caifd = NULL; 191 struct caif_device_entry *caifd = NULL;
209 struct caif_dev_common *caifdev; 192 struct caif_dev_common *caifdev;
210 enum cfcnfg_phy_preference pref; 193 enum cfcnfg_phy_preference pref;
211 int res = -EINVAL;
212 enum cfcnfg_phy_type phy_type; 194 enum cfcnfg_phy_type phy_type;
213 195
214 if (dev->type != ARPHRD_CAIF) 196 if (dev->type != ARPHRD_CAIF)
@@ -223,7 +205,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
223 caifdev = netdev_priv(dev); 205 caifdev = netdev_priv(dev);
224 caifdev->flowctrl = dev_flowctrl; 206 caifdev->flowctrl = dev_flowctrl;
225 atomic_set(&caifd->state, what); 207 atomic_set(&caifd->state, what);
226 res = 0;
227 break; 208 break;
228 209
229 case NETDEV_UP: 210 case NETDEV_UP:
@@ -257,7 +238,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
257 break; 238 break;
258 } 239 }
259 dev_hold(dev); 240 dev_hold(dev);
260 cfcnfg_add_phy_layer(get_caif_conf(), 241 cfcnfg_add_phy_layer(cfg,
261 phy_type, 242 phy_type,
262 dev, 243 dev,
263 &caifd->layer, 244 &caifd->layer,
@@ -287,7 +268,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
287 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 268 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
288 caifd->layer.id); 269 caifd->layer.id);
289 might_sleep(); 270 might_sleep();
290 res = wait_event_interruptible_timeout(caifd->event, 271 wait_event_interruptible_timeout(caifd->event,
291 atomic_read(&caifd->in_use) == 0, 272 atomic_read(&caifd->in_use) == 0,
292 TIMEOUT); 273 TIMEOUT);
293 break; 274 break;
@@ -300,7 +281,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
300 if (atomic_read(&caifd->in_use)) 281 if (atomic_read(&caifd->in_use))
301 netdev_warn(dev, 282 netdev_warn(dev,
302 "Unregistering an active CAIF device\n"); 283 "Unregistering an active CAIF device\n");
303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); 284 cfcnfg_del_phy_layer(cfg, &caifd->layer);
304 dev_put(dev); 285 dev_put(dev);
305 atomic_set(&caifd->state, what); 286 atomic_set(&caifd->state, what);
306 break; 287 break;
@@ -322,24 +303,18 @@ static struct notifier_block caif_device_notifier = {
322 .priority = 0, 303 .priority = 0,
323}; 304};
324 305
325
326struct cfcnfg *get_caif_conf(void)
327{
328 return cfg;
329}
330EXPORT_SYMBOL(get_caif_conf);
331
332int caif_connect_client(struct caif_connect_request *conn_req, 306int caif_connect_client(struct caif_connect_request *conn_req,
333 struct cflayer *client_layer, int *ifindex, 307 struct cflayer *client_layer, int *ifindex,
334 int *headroom, int *tailroom) 308 int *headroom, int *tailroom)
335{ 309{
336 struct cfctrl_link_param param; 310 struct cfctrl_link_param param;
337 int ret; 311 int ret;
338 ret = connect_req_to_link_param(get_caif_conf(), conn_req, &param); 312
313 ret = caif_connect_req_to_link_param(cfg, conn_req, &param);
339 if (ret) 314 if (ret)
340 return ret; 315 return ret;
341 /* Hook up the adaptation layer. */ 316 /* Hook up the adaptation layer. */
342 return cfcnfg_add_adaptation_layer(get_caif_conf(), &param, 317 return cfcnfg_add_adaptation_layer(cfg, &param,
343 client_layer, ifindex, 318 client_layer, ifindex,
344 headroom, tailroom); 319 headroom, tailroom);
345} 320}
@@ -347,16 +322,10 @@ EXPORT_SYMBOL(caif_connect_client);
347 322
348int caif_disconnect_client(struct cflayer *adap_layer) 323int caif_disconnect_client(struct cflayer *adap_layer)
349{ 324{
350 return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer); 325 return cfcnfg_disconn_adapt_layer(cfg, adap_layer);
351} 326}
352EXPORT_SYMBOL(caif_disconnect_client); 327EXPORT_SYMBOL(caif_disconnect_client);
353 328
354void caif_release_client(struct cflayer *adap_layer)
355{
356 cfcnfg_release_adap_layer(adap_layer);
357}
358EXPORT_SYMBOL(caif_release_client);
359
360/* Per-namespace Caif devices handling */ 329/* Per-namespace Caif devices handling */
361static int caif_init_net(struct net *net) 330static int caif_init_net(struct net *net)
362{ 331{
@@ -369,12 +338,11 @@ static int caif_init_net(struct net *net)
369static void caif_exit_net(struct net *net) 338static void caif_exit_net(struct net *net)
370{ 339{
371 struct net_device *dev; 340 struct net_device *dev;
372 int res;
373 rtnl_lock(); 341 rtnl_lock();
374 for_each_netdev(net, dev) { 342 for_each_netdev(net, dev) {
375 if (dev->type != ARPHRD_CAIF) 343 if (dev->type != ARPHRD_CAIF)
376 continue; 344 continue;
377 res = dev_close(dev); 345 dev_close(dev);
378 caif_device_destroy(dev); 346 caif_device_destroy(dev);
379 } 347 }
380 rtnl_unlock(); 348 rtnl_unlock();
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 37a4034dfc29..20212424e2e8 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -519,43 +519,14 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
519 int noblock, long timeo) 519 int noblock, long timeo)
520{ 520{
521 struct cfpkt *pkt; 521 struct cfpkt *pkt;
522 int ret, loopcnt = 0;
523 522
524 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 523 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
525 memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info)); 524 memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info));
526 do {
527
528 ret = -ETIMEDOUT;
529 525
530 /* Slight paranoia, probably not needed. */ 526 if (cf_sk->layer.dn == NULL)
531 if (unlikely(loopcnt++ > 1000)) { 527 return -EINVAL;
532 pr_warn("transmit retries failed, error = %d\n", ret);
533 break;
534 }
535 528
536 if (cf_sk->layer.dn != NULL) 529 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
537 ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
538 if (likely(ret >= 0))
539 break;
540 /* if transmit return -EAGAIN, then retry */
541 if (noblock && ret == -EAGAIN)
542 break;
543 timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret);
544 if (signal_pending(current)) {
545 ret = sock_intr_errno(timeo);
546 break;
547 }
548 if (ret)
549 break;
550 if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
551 sock_flag(&cf_sk->sk, SOCK_DEAD) ||
552 (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) {
553 ret = -EPIPE;
554 cf_sk->sk.sk_err = EPIPE;
555 break;
556 }
557 } while (ret == -EAGAIN);
558 return ret;
559} 530}
560 531
561/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ 532/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index f1f98d967d8a..25c0b198e285 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -253,7 +253,7 @@ static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
253{ 253{
254} 254}
255 255
256int protohead[CFCTRL_SRV_MASK] = { 256static const int protohead[CFCTRL_SRV_MASK] = {
257 [CFCTRL_SRV_VEI] = 4, 257 [CFCTRL_SRV_VEI] = 4,
258 [CFCTRL_SRV_DATAGRAM] = 7, 258 [CFCTRL_SRV_DATAGRAM] = 7,
259 [CFCTRL_SRV_UTIL] = 4, 259 [CFCTRL_SRV_UTIL] = 4,
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 3cd8f978e309..397a2c099e2c 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -58,7 +58,8 @@ struct cflayer *cfctrl_create(void)
58 return &this->serv.layer; 58 return &this->serv.layer;
59} 59}
60 60
61static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2) 61static bool param_eq(const struct cfctrl_link_param *p1,
62 const struct cfctrl_link_param *p2)
62{ 63{
63 bool eq = 64 bool eq =
64 p1->linktype == p2->linktype && 65 p1->linktype == p2->linktype &&
@@ -100,8 +101,8 @@ static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2)
100 return false; 101 return false;
101} 102}
102 103
103bool cfctrl_req_eq(struct cfctrl_request_info *r1, 104static bool cfctrl_req_eq(const struct cfctrl_request_info *r1,
104 struct cfctrl_request_info *r2) 105 const struct cfctrl_request_info *r2)
105{ 106{
106 if (r1->cmd != r2->cmd) 107 if (r1->cmd != r2->cmd)
107 return false; 108 return false;
@@ -112,7 +113,7 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1,
112} 113}
113 114
114/* Insert request at the end */ 115/* Insert request at the end */
115void cfctrl_insert_req(struct cfctrl *ctrl, 116static void cfctrl_insert_req(struct cfctrl *ctrl,
116 struct cfctrl_request_info *req) 117 struct cfctrl_request_info *req)
117{ 118{
118 spin_lock(&ctrl->info_list_lock); 119 spin_lock(&ctrl->info_list_lock);
@@ -123,8 +124,8 @@ void cfctrl_insert_req(struct cfctrl *ctrl,
123} 124}
124 125
125/* Compare and remove request */ 126/* Compare and remove request */
126struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, 127static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
127 struct cfctrl_request_info *req) 128 struct cfctrl_request_info *req)
128{ 129{
129 struct cfctrl_request_info *p, *tmp, *first; 130 struct cfctrl_request_info *p, *tmp, *first;
130 131
@@ -154,16 +155,6 @@ struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
154 return &this->res; 155 return &this->res;
155} 156}
156 157
157void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn)
158{
159 this->dn = dn;
160}
161
162void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up)
163{
164 this->up = up;
165}
166
167static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl) 158static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
168{ 159{
169 info->hdr_len = 0; 160 info->hdr_len = 0;
@@ -304,58 +295,6 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
304 return ret; 295 return ret;
305} 296}
306 297
307void cfctrl_sleep_req(struct cflayer *layer)
308{
309 int ret;
310 struct cfctrl *cfctrl = container_obj(layer);
311 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
312 if (!pkt) {
313 pr_warn("Out of memory\n");
314 return;
315 }
316 cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
317 init_info(cfpkt_info(pkt), cfctrl);
318 ret =
319 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
320 if (ret < 0)
321 cfpkt_destroy(pkt);
322}
323
324void cfctrl_wake_req(struct cflayer *layer)
325{
326 int ret;
327 struct cfctrl *cfctrl = container_obj(layer);
328 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
329 if (!pkt) {
330 pr_warn("Out of memory\n");
331 return;
332 }
333 cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
334 init_info(cfpkt_info(pkt), cfctrl);
335 ret =
336 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
337 if (ret < 0)
338 cfpkt_destroy(pkt);
339}
340
341void cfctrl_getstartreason_req(struct cflayer *layer)
342{
343 int ret;
344 struct cfctrl *cfctrl = container_obj(layer);
345 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
346 if (!pkt) {
347 pr_warn("Out of memory\n");
348 return;
349 }
350 cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
351 init_info(cfpkt_info(pkt), cfctrl);
352 ret =
353 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
354 if (ret < 0)
355 cfpkt_destroy(pkt);
356}
357
358
359void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) 298void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
360{ 299{
361 struct cfctrl_request_info *p, *tmp; 300 struct cfctrl_request_info *p, *tmp;
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 054fdb5aeb88..0382dec84fdc 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -108,10 +108,5 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
108 */ 108 */
109 info->hdr_len = 4; 109 info->hdr_len = 4;
110 info->dev_info = &service->dev_info; 110 info->dev_info = &service->dev_info;
111 ret = layr->dn->transmit(layr->dn, pkt); 111 return layr->dn->transmit(layr->dn, pkt);
112 if (ret < 0) {
113 u32 tmp32;
114 cfpkt_extr_head(pkt, &tmp32, 4);
115 }
116 return ret;
117} 112}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index a445043931ae..2423fed8e26c 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -120,7 +120,6 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
120 int tmp; 120 int tmp;
121 u16 chks; 121 u16 chks;
122 u16 len; 122 u16 len;
123 int ret;
124 struct cffrml *this = container_obj(layr); 123 struct cffrml *this = container_obj(layr);
125 if (this->dofcs) { 124 if (this->dofcs) {
126 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); 125 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
@@ -137,12 +136,7 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
137 pr_err("Packet is erroneous!\n"); 136 pr_err("Packet is erroneous!\n");
138 return -EPROTO; 137 return -EPROTO;
139 } 138 }
140 ret = layr->dn->transmit(layr->dn, pkt); 139 return layr->dn->transmit(layr->dn, pkt);
141 if (ret < 0) {
142 /* Remove header on faulty packet. */
143 cfpkt_extr_head(pkt, &tmp, 2);
144 }
145 return ret;
146} 140}
147 141
148static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 142static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 24f1ffa74b06..fc2497468571 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -71,41 +71,6 @@ int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
71 return 0; 71 return 0;
72} 72}
73 73
74bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid)
75{
76 struct list_head *node;
77 struct cflayer *layer;
78 struct cfmuxl *muxl = container_obj(layr);
79 bool match = false;
80 spin_lock(&muxl->receive_lock);
81
82 list_for_each(node, &muxl->srvl_list) {
83 layer = list_entry(node, struct cflayer, node);
84 if (cfsrvl_phyid_match(layer, phyid)) {
85 match = true;
86 break;
87 }
88
89 }
90 spin_unlock(&muxl->receive_lock);
91 return match;
92}
93
94u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id)
95{
96 struct cflayer *up;
97 int phyid;
98 struct cfmuxl *muxl = container_obj(layr);
99 spin_lock(&muxl->receive_lock);
100 up = get_up(muxl, channel_id);
101 if (up != NULL)
102 phyid = cfsrvl_getphyid(up);
103 else
104 phyid = 0;
105 spin_unlock(&muxl->receive_lock);
106 return phyid;
107}
108
109int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) 74int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
110{ 75{
111 struct cfmuxl *muxl = (struct cfmuxl *) layr; 76 struct cfmuxl *muxl = (struct cfmuxl *) layr;
@@ -219,12 +184,12 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
219 184
220static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) 185static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
221{ 186{
222 int ret;
223 struct cfmuxl *muxl = container_obj(layr); 187 struct cfmuxl *muxl = container_obj(layr);
224 u8 linkid; 188 u8 linkid;
225 struct cflayer *dn; 189 struct cflayer *dn;
226 struct caif_payload_info *info = cfpkt_info(pkt); 190 struct caif_payload_info *info = cfpkt_info(pkt);
227 dn = get_dn(muxl, cfpkt_info(pkt)->dev_info); 191 BUG_ON(!info);
192 dn = get_dn(muxl, info->dev_info);
228 if (dn == NULL) { 193 if (dn == NULL) {
229 pr_warn("Send data on unknown phy ID = %d (0x%x)\n", 194 pr_warn("Send data on unknown phy ID = %d (0x%x)\n",
230 info->dev_info->id, info->dev_info->id); 195 info->dev_info->id, info->dev_info->id);
@@ -233,11 +198,7 @@ static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
233 info->hdr_len += 1; 198 info->hdr_len += 1;
234 linkid = info->channel_id; 199 linkid = info->channel_id;
235 cfpkt_add_head(pkt, &linkid, 1); 200 cfpkt_add_head(pkt, &linkid, 1);
236 ret = dn->transmit(dn, pkt); 201 return dn->transmit(dn, pkt);
237 /* Remove MUX protocol header upon error. */
238 if (ret < 0)
239 cfpkt_extr_head(pkt, &linkid, 1);
240 return ret;
241} 202}
242 203
243static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 204static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index d7e865e2ff65..20c6cb3522e0 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -42,22 +42,22 @@ struct cfpkt_priv_data {
42 bool erronous; 42 bool erronous;
43}; 43};
44 44
45inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) 45static inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt)
46{ 46{
47 return (struct cfpkt_priv_data *) pkt->skb.cb; 47 return (struct cfpkt_priv_data *) pkt->skb.cb;
48} 48}
49 49
50inline bool is_erronous(struct cfpkt *pkt) 50static inline bool is_erronous(struct cfpkt *pkt)
51{ 51{
52 return cfpkt_priv(pkt)->erronous; 52 return cfpkt_priv(pkt)->erronous;
53} 53}
54 54
55inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) 55static inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt)
56{ 56{
57 return &pkt->skb; 57 return &pkt->skb;
58} 58}
59 59
60inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) 60static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
61{ 61{
62 return (struct cfpkt *) skb; 62 return (struct cfpkt *) skb;
63} 63}
@@ -317,17 +317,6 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
317} 317}
318EXPORT_SYMBOL(cfpkt_setlen); 318EXPORT_SYMBOL(cfpkt_setlen);
319 319
320struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
321{
322 struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
323 if (!pkt)
324 return NULL;
325 if (unlikely(data != NULL))
326 cfpkt_add_body(pkt, data, len);
327 return pkt;
328}
329EXPORT_SYMBOL(cfpkt_create_uplink);
330
331struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, 320struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
332 struct cfpkt *addpkt, 321 struct cfpkt *addpkt,
333 u16 expectlen) 322 u16 expectlen)
@@ -408,169 +397,12 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
408} 397}
409EXPORT_SYMBOL(cfpkt_split); 398EXPORT_SYMBOL(cfpkt_split);
410 399
411char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen) 400bool cfpkt_erroneous(struct cfpkt *pkt)
412{
413 struct sk_buff *skb = pkt_to_skb(pkt);
414 char *p = buf;
415 int i;
416
417 /*
418 * Sanity check buffer length, it needs to be at least as large as
419 * the header info: ~=50+ bytes
420 */
421 if (buflen < 50)
422 return NULL;
423
424 snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [",
425 is_erronous(pkt) ? "ERRONOUS-SKB" :
426 (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"),
427 skb,
428 (long) skb->len,
429 (long) (skb_tail_pointer(skb) - skb->data),
430 (long) skb->data_len,
431 (long) (skb->data - skb->head),
432 (long) (skb_tail_pointer(skb) - skb->head));
433 p = buf + strlen(buf);
434
435 for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) {
436 if (p > buf + buflen - 10) {
437 sprintf(p, "...");
438 p = buf + strlen(buf);
439 break;
440 }
441 sprintf(p, "%02x,", skb->data[i]);
442 p = buf + strlen(buf);
443 }
444 sprintf(p, "]\n");
445 return buf;
446}
447EXPORT_SYMBOL(cfpkt_log_pkt);
448
449int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
450{
451 struct sk_buff *skb = pkt_to_skb(pkt);
452 struct sk_buff *lastskb;
453
454 caif_assert(buf != NULL);
455 if (unlikely(is_erronous(pkt)))
456 return -EPROTO;
457 /* Make sure SKB is writable */
458 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
459 PKT_ERROR(pkt, "skb_cow_data failed\n");
460 return -EPROTO;
461 }
462
463 if (unlikely(skb_linearize(skb) != 0)) {
464 PKT_ERROR(pkt, "linearize failed\n");
465 return -EPROTO;
466 }
467
468 if (unlikely(skb_tailroom(skb) < buflen)) {
469 PKT_ERROR(pkt, "buffer too short - failed\n");
470 return -EPROTO;
471 }
472
473 *buf = skb_put(skb, buflen);
474 return 1;
475}
476EXPORT_SYMBOL(cfpkt_raw_append);
477
478int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
479{
480 struct sk_buff *skb = pkt_to_skb(pkt);
481
482 caif_assert(buf != NULL);
483 if (unlikely(is_erronous(pkt)))
484 return -EPROTO;
485
486 if (unlikely(buflen > skb->len)) {
487 PKT_ERROR(pkt, "buflen too large - failed\n");
488 return -EPROTO;
489 }
490
491 if (unlikely(buflen > skb_headlen(skb))) {
492 if (unlikely(skb_linearize(skb) != 0)) {
493 PKT_ERROR(pkt, "linearize failed\n");
494 return -EPROTO;
495 }
496 }
497
498 *buf = skb->data;
499 skb_pull(skb, buflen);
500
501 return 1;
502}
503EXPORT_SYMBOL(cfpkt_raw_extract);
504
505inline bool cfpkt_erroneous(struct cfpkt *pkt)
506{ 401{
507 return cfpkt_priv(pkt)->erronous; 402 return cfpkt_priv(pkt)->erronous;
508} 403}
509EXPORT_SYMBOL(cfpkt_erroneous); 404EXPORT_SYMBOL(cfpkt_erroneous);
510 405
511struct cfpktq *cfpktq_create(void)
512{
513 struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC);
514 if (!q)
515 return NULL;
516 skb_queue_head_init(&q->head);
517 atomic_set(&q->count, 0);
518 spin_lock_init(&q->lock);
519 return q;
520}
521EXPORT_SYMBOL(cfpktq_create);
522
523void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio)
524{
525 atomic_inc(&pktq->count);
526 spin_lock(&pktq->lock);
527 skb_queue_tail(&pktq->head, pkt_to_skb(pkt));
528 spin_unlock(&pktq->lock);
529
530}
531EXPORT_SYMBOL(cfpkt_queue);
532
533struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
534{
535 struct cfpkt *tmp;
536 spin_lock(&pktq->lock);
537 tmp = skb_to_pkt(skb_peek(&pktq->head));
538 spin_unlock(&pktq->lock);
539 return tmp;
540}
541EXPORT_SYMBOL(cfpkt_qpeek);
542
543struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq)
544{
545 struct cfpkt *pkt;
546 spin_lock(&pktq->lock);
547 pkt = skb_to_pkt(skb_dequeue(&pktq->head));
548 if (pkt) {
549 atomic_dec(&pktq->count);
550 caif_assert(atomic_read(&pktq->count) >= 0);
551 }
552 spin_unlock(&pktq->lock);
553 return pkt;
554}
555EXPORT_SYMBOL(cfpkt_dequeue);
556
557int cfpkt_qcount(struct cfpktq *pktq)
558{
559 return atomic_read(&pktq->count);
560}
561EXPORT_SYMBOL(cfpkt_qcount);
562
563struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt)
564{
565 struct cfpkt *clone;
566 clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC));
567 /* Free original packet. */
568 cfpkt_destroy(pkt);
569 if (!clone)
570 return NULL;
571 return clone;
572}
573EXPORT_SYMBOL(cfpkt_clone_release);
574 406
575struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) 407struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
576{ 408{
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 8303fe3ebf89..2715c84cfa87 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -179,15 +179,10 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
179static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt) 179static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
180{ 180{
181 struct cfserl *layr = container_obj(layer); 181 struct cfserl *layr = container_obj(layer);
182 int ret;
183 u8 tmp8 = CFSERL_STX; 182 u8 tmp8 = CFSERL_STX;
184 if (layr->usestx) 183 if (layr->usestx)
185 cfpkt_add_head(newpkt, &tmp8, 1); 184 cfpkt_add_head(newpkt, &tmp8, 1);
186 ret = layer->dn->transmit(layer->dn, newpkt); 185 return layer->dn->transmit(layer->dn, newpkt);
187 if (ret < 0)
188 cfpkt_extr_head(newpkt, &tmp8, 1);
189
190 return ret;
191} 186}
192 187
193static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 188static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index ab5e542526bf..24ba392f203b 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -151,12 +151,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
151 return -EINVAL; 151 return -EINVAL;
152} 152}
153 153
154void cfservl_destroy(struct cflayer *layer) 154static void cfsrvl_release(struct kref *kref)
155{
156 kfree(layer);
157}
158
159void cfsrvl_release(struct kref *kref)
160{ 155{
161 struct cfsrvl *service = container_of(kref, struct cfsrvl, ref); 156 struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
162 kfree(service); 157 kfree(service);
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 315c0d601368..98e027db18ed 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -100,10 +100,5 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
100 */ 100 */
101 info->hdr_len = 1; 101 info->hdr_len = 1;
102 info->dev_info = &service->dev_info; 102 info->dev_info = &service->dev_info;
103 ret = layr->dn->transmit(layr->dn, pkt); 103 return layr->dn->transmit(layr->dn, pkt);
104 if (ret < 0) {
105 u32 tmp32;
106 cfpkt_extr_head(pkt, &tmp32, 4);
107 }
108 return ret;
109} 104}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index c3b1dec4acf6..1a588cd818ea 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -96,8 +96,5 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
96 info->channel_id = service->layer.id; 96 info->channel_id = service->layer.id;
97 info->hdr_len = 1; 97 info->hdr_len = 1;
98 info->dev_info = &service->dev_info; 98 info->dev_info = &service->dev_info;
99 ret = layr->dn->transmit(layr->dn, pkt); 99 return layr->dn->transmit(layr->dn, pkt);
100 if (ret < 0)
101 cfpkt_extr_head(pkt, &tmp, 1);
102 return ret;
103} 100}
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index bf6fef2a0eff..b2f5989ad455 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -60,8 +60,5 @@ static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
60 info = cfpkt_info(pkt); 60 info = cfpkt_info(pkt);
61 info->channel_id = service->layer.id; 61 info->channel_id = service->layer.id;
62 info->dev_info = &service->dev_info; 62 info->dev_info = &service->dev_info;
63 ret = layr->dn->transmit(layr->dn, pkt); 63 return layr->dn->transmit(layr->dn, pkt);
64 if (ret < 0)
65 cfpkt_extr_head(pkt, &videoheader, 4);
66 return ret;
67} 64}
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 733d66f1b05a..094fc5332d42 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -84,8 +84,8 @@ static DEFINE_SPINLOCK(can_rcvlists_lock);
84static struct kmem_cache *rcv_cache __read_mostly; 84static struct kmem_cache *rcv_cache __read_mostly;
85 85
86/* table of registered CAN protocols */ 86/* table of registered CAN protocols */
87static struct can_proto *proto_tab[CAN_NPROTO] __read_mostly; 87static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
88static DEFINE_SPINLOCK(proto_tab_lock); 88static DEFINE_MUTEX(proto_tab_lock);
89 89
90struct timer_list can_stattimer; /* timer for statistics update */ 90struct timer_list can_stattimer; /* timer for statistics update */
91struct s_stats can_stats; /* packet statistics */ 91struct s_stats can_stats; /* packet statistics */
@@ -115,11 +115,29 @@ static void can_sock_destruct(struct sock *sk)
115 skb_queue_purge(&sk->sk_receive_queue); 115 skb_queue_purge(&sk->sk_receive_queue);
116} 116}
117 117
118static const struct can_proto *can_get_proto(int protocol)
119{
120 const struct can_proto *cp;
121
122 rcu_read_lock();
123 cp = rcu_dereference(proto_tab[protocol]);
124 if (cp && !try_module_get(cp->prot->owner))
125 cp = NULL;
126 rcu_read_unlock();
127
128 return cp;
129}
130
131static inline void can_put_proto(const struct can_proto *cp)
132{
133 module_put(cp->prot->owner);
134}
135
118static int can_create(struct net *net, struct socket *sock, int protocol, 136static int can_create(struct net *net, struct socket *sock, int protocol,
119 int kern) 137 int kern)
120{ 138{
121 struct sock *sk; 139 struct sock *sk;
122 struct can_proto *cp; 140 const struct can_proto *cp;
123 int err = 0; 141 int err = 0;
124 142
125 sock->state = SS_UNCONNECTED; 143 sock->state = SS_UNCONNECTED;
@@ -130,9 +148,12 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
130 if (!net_eq(net, &init_net)) 148 if (!net_eq(net, &init_net))
131 return -EAFNOSUPPORT; 149 return -EAFNOSUPPORT;
132 150
151 cp = can_get_proto(protocol);
152
133#ifdef CONFIG_MODULES 153#ifdef CONFIG_MODULES
134 /* try to load protocol module kernel is modular */ 154 if (!cp) {
135 if (!proto_tab[protocol]) { 155 /* try to load protocol module if kernel is modular */
156
136 err = request_module("can-proto-%d", protocol); 157 err = request_module("can-proto-%d", protocol);
137 158
138 /* 159 /*
@@ -143,22 +164,18 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
143 if (err && printk_ratelimit()) 164 if (err && printk_ratelimit())
144 printk(KERN_ERR "can: request_module " 165 printk(KERN_ERR "can: request_module "
145 "(can-proto-%d) failed.\n", protocol); 166 "(can-proto-%d) failed.\n", protocol);
167
168 cp = can_get_proto(protocol);
146 } 169 }
147#endif 170#endif
148 171
149 spin_lock(&proto_tab_lock);
150 cp = proto_tab[protocol];
151 if (cp && !try_module_get(cp->prot->owner))
152 cp = NULL;
153 spin_unlock(&proto_tab_lock);
154
155 /* check for available protocol and correct usage */ 172 /* check for available protocol and correct usage */
156 173
157 if (!cp) 174 if (!cp)
158 return -EPROTONOSUPPORT; 175 return -EPROTONOSUPPORT;
159 176
160 if (cp->type != sock->type) { 177 if (cp->type != sock->type) {
161 err = -EPROTONOSUPPORT; 178 err = -EPROTOTYPE;
162 goto errout; 179 goto errout;
163 } 180 }
164 181
@@ -183,7 +200,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
183 } 200 }
184 201
185 errout: 202 errout:
186 module_put(cp->prot->owner); 203 can_put_proto(cp);
187 return err; 204 return err;
188} 205}
189 206
@@ -679,7 +696,7 @@ drop:
679 * -EBUSY protocol already in use 696 * -EBUSY protocol already in use
680 * -ENOBUF if proto_register() fails 697 * -ENOBUF if proto_register() fails
681 */ 698 */
682int can_proto_register(struct can_proto *cp) 699int can_proto_register(const struct can_proto *cp)
683{ 700{
684 int proto = cp->protocol; 701 int proto = cp->protocol;
685 int err = 0; 702 int err = 0;
@@ -694,15 +711,16 @@ int can_proto_register(struct can_proto *cp)
694 if (err < 0) 711 if (err < 0)
695 return err; 712 return err;
696 713
697 spin_lock(&proto_tab_lock); 714 mutex_lock(&proto_tab_lock);
715
698 if (proto_tab[proto]) { 716 if (proto_tab[proto]) {
699 printk(KERN_ERR "can: protocol %d already registered\n", 717 printk(KERN_ERR "can: protocol %d already registered\n",
700 proto); 718 proto);
701 err = -EBUSY; 719 err = -EBUSY;
702 } else 720 } else
703 proto_tab[proto] = cp; 721 rcu_assign_pointer(proto_tab[proto], cp);
704 722
705 spin_unlock(&proto_tab_lock); 723 mutex_unlock(&proto_tab_lock);
706 724
707 if (err < 0) 725 if (err < 0)
708 proto_unregister(cp->prot); 726 proto_unregister(cp->prot);
@@ -715,17 +733,16 @@ EXPORT_SYMBOL(can_proto_register);
715 * can_proto_unregister - unregister CAN transport protocol 733 * can_proto_unregister - unregister CAN transport protocol
716 * @cp: pointer to CAN protocol structure 734 * @cp: pointer to CAN protocol structure
717 */ 735 */
718void can_proto_unregister(struct can_proto *cp) 736void can_proto_unregister(const struct can_proto *cp)
719{ 737{
720 int proto = cp->protocol; 738 int proto = cp->protocol;
721 739
722 spin_lock(&proto_tab_lock); 740 mutex_lock(&proto_tab_lock);
723 if (!proto_tab[proto]) { 741 BUG_ON(proto_tab[proto] != cp);
724 printk(KERN_ERR "BUG: can: protocol %d is not registered\n", 742 rcu_assign_pointer(proto_tab[proto], NULL);
725 proto); 743 mutex_unlock(&proto_tab_lock);
726 } 744
727 proto_tab[proto] = NULL; 745 synchronize_rcu();
728 spin_unlock(&proto_tab_lock);
729 746
730 proto_unregister(cp->prot); 747 proto_unregister(cp->prot);
731} 748}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 8a6a05e7c3c8..cced806098a9 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1601,7 +1601,7 @@ static struct proto bcm_proto __read_mostly = {
1601 .init = bcm_init, 1601 .init = bcm_init,
1602}; 1602};
1603 1603
1604static struct can_proto bcm_can_proto __read_mostly = { 1604static const struct can_proto bcm_can_proto = {
1605 .type = SOCK_DGRAM, 1605 .type = SOCK_DGRAM,
1606 .protocol = CAN_BCM, 1606 .protocol = CAN_BCM,
1607 .ops = &bcm_ops, 1607 .ops = &bcm_ops,
diff --git a/net/can/raw.c b/net/can/raw.c
index 0eb39a7fdf64..dea99a6e596c 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -774,7 +774,7 @@ static struct proto raw_proto __read_mostly = {
774 .init = raw_init, 774 .init = raw_init,
775}; 775};
776 776
777static struct can_proto raw_can_proto __read_mostly = { 777static const struct can_proto raw_can_proto = {
778 .type = SOCK_RAW, 778 .type = SOCK_RAW,
779 .protocol = CAN_RAW, 779 .protocol = CAN_RAW,
780 .ops = &raw_ops, 780 .ops = &raw_ops,
diff --git a/net/compat.c b/net/compat.c
index 3649d5895361..c578d9382e19 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -722,11 +722,11 @@ EXPORT_SYMBOL(compat_mc_getsockopt);
722 722
723/* Argument list sizes for compat_sys_socketcall */ 723/* Argument list sizes for compat_sys_socketcall */
724#define AL(x) ((x) * sizeof(u32)) 724#define AL(x) ((x) * sizeof(u32))
725static unsigned char nas[20] = { 725static unsigned char nas[21] = {
726 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), 726 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
727 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), 727 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
728 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), 728 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
729 AL(4), AL(5) 729 AL(4), AL(5), AL(4)
730}; 730};
731#undef AL 731#undef AL
732 732
@@ -735,6 +735,13 @@ asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, uns
735 return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 735 return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
736} 736}
737 737
738asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
739 unsigned vlen, unsigned int flags)
740{
741 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
742 flags | MSG_CMSG_COMPAT);
743}
744
738asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) 745asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
739{ 746{
740 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 747 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
@@ -780,7 +787,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
780 u32 a[6]; 787 u32 a[6];
781 u32 a0, a1; 788 u32 a0, a1;
782 789
783 if (call < SYS_SOCKET || call > SYS_RECVMMSG) 790 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
784 return -EINVAL; 791 return -EINVAL;
785 if (copy_from_user(a, args, nas[call])) 792 if (copy_from_user(a, args, nas[call]))
786 return -EFAULT; 793 return -EFAULT;
@@ -839,6 +846,9 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
839 case SYS_SENDMSG: 846 case SYS_SENDMSG:
840 ret = compat_sys_sendmsg(a0, compat_ptr(a1), a[2]); 847 ret = compat_sys_sendmsg(a0, compat_ptr(a1), a[2]);
841 break; 848 break;
849 case SYS_SENDMMSG:
850 ret = compat_sys_sendmmsg(a0, compat_ptr(a1), a[2], a[3]);
851 break;
842 case SYS_RECVMSG: 852 case SYS_RECVMSG:
843 ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); 853 ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]);
844 break; 854 break;
diff --git a/net/core/dev.c b/net/core/dev.c
index 856b6ee9a1d5..44ef8f8998ca 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -948,7 +948,7 @@ int dev_alloc_name(struct net_device *dev, const char *name)
948} 948}
949EXPORT_SYMBOL(dev_alloc_name); 949EXPORT_SYMBOL(dev_alloc_name);
950 950
951static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) 951static int dev_get_valid_name(struct net_device *dev, const char *name)
952{ 952{
953 struct net *net; 953 struct net *net;
954 954
@@ -958,7 +958,7 @@ static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt
958 if (!dev_valid_name(name)) 958 if (!dev_valid_name(name))
959 return -EINVAL; 959 return -EINVAL;
960 960
961 if (fmt && strchr(name, '%')) 961 if (strchr(name, '%'))
962 return dev_alloc_name(dev, name); 962 return dev_alloc_name(dev, name);
963 else if (__dev_get_by_name(net, name)) 963 else if (__dev_get_by_name(net, name))
964 return -EEXIST; 964 return -EEXIST;
@@ -995,7 +995,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
995 995
996 memcpy(oldname, dev->name, IFNAMSIZ); 996 memcpy(oldname, dev->name, IFNAMSIZ);
997 997
998 err = dev_get_valid_name(dev, newname, 1); 998 err = dev_get_valid_name(dev, newname);
999 if (err < 0) 999 if (err < 0)
1000 return err; 1000 return err;
1001 1001
@@ -1315,7 +1315,8 @@ void dev_disable_lro(struct net_device *dev)
1315 return; 1315 return;
1316 1316
1317 __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO); 1317 __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
1318 WARN_ON(dev->features & NETIF_F_LRO); 1318 if (unlikely(dev->features & NETIF_F_LRO))
1319 netdev_WARN(dev, "failed to disable LRO!\n");
1319} 1320}
1320EXPORT_SYMBOL(dev_disable_lro); 1321EXPORT_SYMBOL(dev_disable_lro);
1321 1322
@@ -2502,8 +2503,8 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2502__u32 __skb_get_rxhash(struct sk_buff *skb) 2503__u32 __skb_get_rxhash(struct sk_buff *skb)
2503{ 2504{
2504 int nhoff, hash = 0, poff; 2505 int nhoff, hash = 0, poff;
2505 struct ipv6hdr *ip6; 2506 const struct ipv6hdr *ip6;
2506 struct iphdr *ip; 2507 const struct iphdr *ip;
2507 u8 ip_proto; 2508 u8 ip_proto;
2508 u32 addr1, addr2, ihl; 2509 u32 addr1, addr2, ihl;
2509 union { 2510 union {
@@ -2518,7 +2519,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2518 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) 2519 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2519 goto done; 2520 goto done;
2520 2521
2521 ip = (struct iphdr *) (skb->data + nhoff); 2522 ip = (const struct iphdr *) (skb->data + nhoff);
2522 if (ip->frag_off & htons(IP_MF | IP_OFFSET)) 2523 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2523 ip_proto = 0; 2524 ip_proto = 0;
2524 else 2525 else
@@ -2531,7 +2532,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2531 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) 2532 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2532 goto done; 2533 goto done;
2533 2534
2534 ip6 = (struct ipv6hdr *) (skb->data + nhoff); 2535 ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
2535 ip_proto = ip6->nexthdr; 2536 ip_proto = ip6->nexthdr;
2536 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2537 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2537 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2538 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -3076,25 +3077,6 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3076} 3077}
3077EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3078EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3078 3079
3079static void vlan_on_bond_hook(struct sk_buff *skb)
3080{
3081 /*
3082 * Make sure ARP frames received on VLAN interfaces stacked on
3083 * bonding interfaces still make their way to any base bonding
3084 * device that may have registered for a specific ptype.
3085 */
3086 if (skb->dev->priv_flags & IFF_802_1Q_VLAN &&
3087 vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING &&
3088 skb->protocol == htons(ETH_P_ARP)) {
3089 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
3090
3091 if (!skb2)
3092 return;
3093 skb2->dev = vlan_dev_real_dev(skb->dev);
3094 netif_rx(skb2);
3095 }
3096}
3097
3098static int __netif_receive_skb(struct sk_buff *skb) 3080static int __netif_receive_skb(struct sk_buff *skb)
3099{ 3081{
3100 struct packet_type *ptype, *pt_prev; 3082 struct packet_type *ptype, *pt_prev;
@@ -3130,6 +3112,12 @@ another_round:
3130 3112
3131 __this_cpu_inc(softnet_data.processed); 3113 __this_cpu_inc(softnet_data.processed);
3132 3114
3115 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3116 skb = vlan_untag(skb);
3117 if (unlikely(!skb))
3118 goto out;
3119 }
3120
3133#ifdef CONFIG_NET_CLS_ACT 3121#ifdef CONFIG_NET_CLS_ACT
3134 if (skb->tc_verd & TC_NCLS) { 3122 if (skb->tc_verd & TC_NCLS) {
3135 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3123 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -3177,15 +3165,13 @@ ncls:
3177 ret = deliver_skb(skb, pt_prev, orig_dev); 3165 ret = deliver_skb(skb, pt_prev, orig_dev);
3178 pt_prev = NULL; 3166 pt_prev = NULL;
3179 } 3167 }
3180 if (vlan_hwaccel_do_receive(&skb)) { 3168 if (vlan_do_receive(&skb)) {
3181 ret = __netif_receive_skb(skb); 3169 ret = __netif_receive_skb(skb);
3182 goto out; 3170 goto out;
3183 } else if (unlikely(!skb)) 3171 } else if (unlikely(!skb))
3184 goto out; 3172 goto out;
3185 } 3173 }
3186 3174
3187 vlan_on_bond_hook(skb);
3188
3189 /* deliver only exact match when indicated */ 3175 /* deliver only exact match when indicated */
3190 null_or_dev = deliver_exact ? skb->dev : NULL; 3176 null_or_dev = deliver_exact ? skb->dev : NULL;
3191 3177
@@ -4510,6 +4496,30 @@ void dev_set_rx_mode(struct net_device *dev)
4510} 4496}
4511 4497
4512/** 4498/**
4499 * dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
4500 * @dev: device
4501 * @cmd: memory area for ethtool_ops::get_settings() result
4502 *
4503 * The cmd arg is initialized properly (cleared and
4504 * ethtool_cmd::cmd field set to ETHTOOL_GSET).
4505 *
4506 * Return device's ethtool_ops::get_settings() result value or
4507 * -EOPNOTSUPP when device doesn't expose
4508 * ethtool_ops::get_settings() operation.
4509 */
4510int dev_ethtool_get_settings(struct net_device *dev,
4511 struct ethtool_cmd *cmd)
4512{
4513 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
4514 return -EOPNOTSUPP;
4515
4516 memset(cmd, 0, sizeof(struct ethtool_cmd));
4517 cmd->cmd = ETHTOOL_GSET;
4518 return dev->ethtool_ops->get_settings(dev, cmd);
4519}
4520EXPORT_SYMBOL(dev_ethtool_get_settings);
4521
4522/**
4513 * dev_get_flags - get flags reported to userspace 4523 * dev_get_flags - get flags reported to userspace
4514 * @dev: device 4524 * @dev: device
4515 * 4525 *
@@ -5240,11 +5250,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5240} 5250}
5241EXPORT_SYMBOL(netdev_fix_features); 5251EXPORT_SYMBOL(netdev_fix_features);
5242 5252
5243void netdev_update_features(struct net_device *dev) 5253int __netdev_update_features(struct net_device *dev)
5244{ 5254{
5245 u32 features; 5255 u32 features;
5246 int err = 0; 5256 int err = 0;
5247 5257
5258 ASSERT_RTNL();
5259
5248 features = netdev_get_wanted_features(dev); 5260 features = netdev_get_wanted_features(dev);
5249 5261
5250 if (dev->netdev_ops->ndo_fix_features) 5262 if (dev->netdev_ops->ndo_fix_features)
@@ -5254,7 +5266,7 @@ void netdev_update_features(struct net_device *dev)
5254 features = netdev_fix_features(dev, features); 5266 features = netdev_fix_features(dev, features);
5255 5267
5256 if (dev->features == features) 5268 if (dev->features == features)
5257 return; 5269 return 0;
5258 5270
5259 netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n", 5271 netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
5260 dev->features, features); 5272 dev->features, features);
@@ -5262,12 +5274,23 @@ void netdev_update_features(struct net_device *dev)
5262 if (dev->netdev_ops->ndo_set_features) 5274 if (dev->netdev_ops->ndo_set_features)
5263 err = dev->netdev_ops->ndo_set_features(dev, features); 5275 err = dev->netdev_ops->ndo_set_features(dev, features);
5264 5276
5265 if (!err) 5277 if (unlikely(err < 0)) {
5266 dev->features = features;
5267 else if (err < 0)
5268 netdev_err(dev, 5278 netdev_err(dev,
5269 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n", 5279 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
5270 err, features, dev->features); 5280 err, features, dev->features);
5281 return -1;
5282 }
5283
5284 if (!err)
5285 dev->features = features;
5286
5287 return 1;
5288}
5289
5290void netdev_update_features(struct net_device *dev)
5291{
5292 if (__netdev_update_features(dev))
5293 netdev_features_change(dev);
5271} 5294}
5272EXPORT_SYMBOL(netdev_update_features); 5295EXPORT_SYMBOL(netdev_update_features);
5273 5296
@@ -5397,8 +5420,8 @@ int register_netdevice(struct net_device *dev)
5397 } 5420 }
5398 } 5421 }
5399 5422
5400 ret = dev_get_valid_name(dev, dev->name, 0); 5423 ret = dev_get_valid_name(dev, dev->name);
5401 if (ret) 5424 if (ret < 0)
5402 goto err_uninit; 5425 goto err_uninit;
5403 5426
5404 dev->ifindex = dev_new_index(net); 5427 dev->ifindex = dev_new_index(net);
@@ -5418,6 +5441,14 @@ int register_netdevice(struct net_device *dev)
5418 dev->features &= ~NETIF_F_GSO; 5441 dev->features &= ~NETIF_F_GSO;
5419 } 5442 }
5420 5443
5444 /* Turn on no cache copy if HW is doing checksum */
5445 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5446 if ((dev->features & NETIF_F_ALL_CSUM) &&
5447 !(dev->features & NETIF_F_NO_CSUM)) {
5448 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5449 dev->features |= NETIF_F_NOCACHE_COPY;
5450 }
5451
5421 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, 5452 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5422 * vlan_dev_init() will do the dev->features check, so these features 5453 * vlan_dev_init() will do the dev->features check, so these features
5423 * are enabled only if supported by underlying device. 5454 * are enabled only if supported by underlying device.
@@ -5434,7 +5465,7 @@ int register_netdevice(struct net_device *dev)
5434 goto err_uninit; 5465 goto err_uninit;
5435 dev->reg_state = NETREG_REGISTERED; 5466 dev->reg_state = NETREG_REGISTERED;
5436 5467
5437 netdev_update_features(dev); 5468 __netdev_update_features(dev);
5438 5469
5439 /* 5470 /*
5440 * Default initial state at registry is that the 5471 * Default initial state at registry is that the
@@ -5531,19 +5562,7 @@ int register_netdev(struct net_device *dev)
5531 int err; 5562 int err;
5532 5563
5533 rtnl_lock(); 5564 rtnl_lock();
5534
5535 /*
5536 * If the name is a format string the caller wants us to do a
5537 * name allocation.
5538 */
5539 if (strchr(dev->name, '%')) {
5540 err = dev_alloc_name(dev, dev->name);
5541 if (err < 0)
5542 goto out;
5543 }
5544
5545 err = register_netdevice(dev); 5565 err = register_netdevice(dev);
5546out:
5547 rtnl_unlock(); 5566 rtnl_unlock();
5548 return err; 5567 return err;
5549} 5568}
@@ -6025,7 +6044,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6025 /* We get here if we can't use the current device name */ 6044 /* We get here if we can't use the current device name */
6026 if (!pat) 6045 if (!pat)
6027 goto out; 6046 goto out;
6028 if (dev_get_valid_name(dev, pat, 1)) 6047 if (dev_get_valid_name(dev, pat) < 0)
6029 goto out; 6048 goto out;
6030 } 6049 }
6031 6050
@@ -6157,29 +6176,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
6157 */ 6176 */
6158u32 netdev_increment_features(u32 all, u32 one, u32 mask) 6177u32 netdev_increment_features(u32 all, u32 one, u32 mask)
6159{ 6178{
6160 /* If device needs checksumming, downgrade to it. */ 6179 if (mask & NETIF_F_GEN_CSUM)
6161 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) 6180 mask |= NETIF_F_ALL_CSUM;
6162 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); 6181 mask |= NETIF_F_VLAN_CHALLENGED;
6163 else if (mask & NETIF_F_ALL_CSUM) {
6164 /* If one device supports v4/v6 checksumming, set for all. */
6165 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
6166 !(all & NETIF_F_GEN_CSUM)) {
6167 all &= ~NETIF_F_ALL_CSUM;
6168 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
6169 }
6170 6182
6171 /* If one device supports hw checksumming, set for all. */ 6183 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6172 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) { 6184 all &= one | ~NETIF_F_ALL_FOR_ALL;
6173 all &= ~NETIF_F_ALL_CSUM;
6174 all |= NETIF_F_HW_CSUM;
6175 }
6176 }
6177 6185
6178 one |= NETIF_F_ALL_CSUM; 6186 /* If device needs checksumming, downgrade to it. */
6187 if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
6188 all &= ~NETIF_F_NO_CSUM;
6179 6189
6180 one |= all & NETIF_F_ONE_FOR_ALL; 6190 /* If one device supports hw checksumming, set for all. */
6181 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO; 6191 if (all & NETIF_F_GEN_CSUM)
6182 all |= one & mask & NETIF_F_ONE_FOR_ALL; 6192 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6183 6193
6184 return all; 6194 return all;
6185} 6195}
diff --git a/net/core/dst.c b/net/core/dst.c
index 91104d35de7d..30f009327b62 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -166,7 +166,8 @@ EXPORT_SYMBOL(dst_discard);
166 166
167const u32 dst_default_metrics[RTAX_MAX]; 167const u32 dst_default_metrics[RTAX_MAX];
168 168
169void *dst_alloc(struct dst_ops *ops, int initial_ref) 169void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
170 int initial_ref, int initial_obsolete, int flags)
170{ 171{
171 struct dst_entry *dst; 172 struct dst_entry *dst;
172 173
@@ -174,15 +175,36 @@ void *dst_alloc(struct dst_ops *ops, int initial_ref)
174 if (ops->gc(ops)) 175 if (ops->gc(ops))
175 return NULL; 176 return NULL;
176 } 177 }
177 dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); 178 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
178 if (!dst) 179 if (!dst)
179 return NULL; 180 return NULL;
180 atomic_set(&dst->__refcnt, initial_ref); 181 dst->child = NULL;
182 dst->dev = dev;
183 if (dev)
184 dev_hold(dev);
181 dst->ops = ops; 185 dst->ops = ops;
182 dst->lastuse = jiffies;
183 dst->path = dst;
184 dst->input = dst->output = dst_discard;
185 dst_init_metrics(dst, dst_default_metrics, true); 186 dst_init_metrics(dst, dst_default_metrics, true);
187 dst->expires = 0UL;
188 dst->path = dst;
189 dst->neighbour = NULL;
190 dst->hh = NULL;
191#ifdef CONFIG_XFRM
192 dst->xfrm = NULL;
193#endif
194 dst->input = dst_discard;
195 dst->output = dst_discard;
196 dst->error = 0;
197 dst->obsolete = initial_obsolete;
198 dst->header_len = 0;
199 dst->trailer_len = 0;
200#ifdef CONFIG_IP_ROUTE_CLASSID
201 dst->tclassid = 0;
202#endif
203 atomic_set(&dst->__refcnt, initial_ref);
204 dst->__use = 0;
205 dst->lastuse = jiffies;
206 dst->flags = flags;
207 dst->next = NULL;
186#if RT_CACHE_DEBUG >= 2 208#if RT_CACHE_DEBUG >= 2
187 atomic_inc(&dst_total); 209 atomic_inc(&dst_total);
188#endif 210#endif
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 74ead9eca126..d8b1a8d85a96 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -21,6 +21,8 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/rtnetlink.h>
25#include <linux/sched.h>
24 26
25/* 27/*
26 * Some useful ethtool_ops methods that're device independent. 28 * Some useful ethtool_ops methods that're device independent.
@@ -317,7 +319,7 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
317 319
318 dev->wanted_features &= ~features[0].valid; 320 dev->wanted_features &= ~features[0].valid;
319 dev->wanted_features |= features[0].valid & features[0].requested; 321 dev->wanted_features |= features[0].valid & features[0].requested;
320 netdev_update_features(dev); 322 __netdev_update_features(dev);
321 323
322 if ((dev->wanted_features ^ dev->features) & features[0].valid) 324 if ((dev->wanted_features ^ dev->features) & features[0].valid)
323 ret |= ETHTOOL_F_WISH; 325 ret |= ETHTOOL_F_WISH;
@@ -359,7 +361,7 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
359 /* NETIF_F_NTUPLE */ "rx-ntuple-filter", 361 /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
360 /* NETIF_F_RXHASH */ "rx-hashing", 362 /* NETIF_F_RXHASH */ "rx-hashing",
361 /* NETIF_F_RXCSUM */ "rx-checksum", 363 /* NETIF_F_RXCSUM */ "rx-checksum",
362 "", 364 /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy"
363 "", 365 "",
364}; 366};
365 367
@@ -499,7 +501,7 @@ static int ethtool_set_one_feature(struct net_device *dev,
499 else 501 else
500 dev->wanted_features &= ~mask; 502 dev->wanted_features &= ~mask;
501 503
502 netdev_update_features(dev); 504 __netdev_update_features(dev);
503 return 0; 505 return 0;
504 } 506 }
505 507
@@ -544,14 +546,14 @@ int __ethtool_set_flags(struct net_device *dev, u32 data)
544 } 546 }
545 547
546 /* allow changing only bits set in hw_features */ 548 /* allow changing only bits set in hw_features */
547 changed = (data ^ dev->wanted_features) & flags_dup_features; 549 changed = (data ^ dev->features) & flags_dup_features;
548 if (changed & ~dev->hw_features) 550 if (changed & ~dev->hw_features)
549 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; 551 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
550 552
551 dev->wanted_features = 553 dev->wanted_features =
552 (dev->wanted_features & ~changed) | data; 554 (dev->wanted_features & ~changed) | (data & dev->hw_features);
553 555
554 netdev_update_features(dev); 556 __netdev_update_features(dev);
555 557
556 return 0; 558 return 0;
557} 559}
@@ -908,6 +910,9 @@ static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
908 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL; 910 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
909 int ret; 911 int ret;
910 912
913 if (!ops->set_rx_ntuple)
914 return -EOPNOTSUPP;
915
911 if (!(dev->features & NETIF_F_NTUPLE)) 916 if (!(dev->features & NETIF_F_NTUPLE))
912 return -EINVAL; 917 return -EINVAL;
913 918
@@ -1441,6 +1446,35 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
1441 return dev->ethtool_ops->set_ringparam(dev, &ringparam); 1446 return dev->ethtool_ops->set_ringparam(dev, &ringparam);
1442} 1447}
1443 1448
1449static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1450 void __user *useraddr)
1451{
1452 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
1453
1454 if (!dev->ethtool_ops->get_channels)
1455 return -EOPNOTSUPP;
1456
1457 dev->ethtool_ops->get_channels(dev, &channels);
1458
1459 if (copy_to_user(useraddr, &channels, sizeof(channels)))
1460 return -EFAULT;
1461 return 0;
1462}
1463
1464static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1465 void __user *useraddr)
1466{
1467 struct ethtool_channels channels;
1468
1469 if (!dev->ethtool_ops->set_channels)
1470 return -EOPNOTSUPP;
1471
1472 if (copy_from_user(&channels, useraddr, sizeof(channels)))
1473 return -EFAULT;
1474
1475 return dev->ethtool_ops->set_channels(dev, &channels);
1476}
1477
1444static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) 1478static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
1445{ 1479{
1446 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; 1480 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
@@ -1618,14 +1652,64 @@ out:
1618static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) 1652static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1619{ 1653{
1620 struct ethtool_value id; 1654 struct ethtool_value id;
1655 static bool busy;
1656 int rc;
1621 1657
1622 if (!dev->ethtool_ops->phys_id) 1658 if (!dev->ethtool_ops->set_phys_id && !dev->ethtool_ops->phys_id)
1623 return -EOPNOTSUPP; 1659 return -EOPNOTSUPP;
1624 1660
1661 if (busy)
1662 return -EBUSY;
1663
1625 if (copy_from_user(&id, useraddr, sizeof(id))) 1664 if (copy_from_user(&id, useraddr, sizeof(id)))
1626 return -EFAULT; 1665 return -EFAULT;
1627 1666
1628 return dev->ethtool_ops->phys_id(dev, id.data); 1667 if (!dev->ethtool_ops->set_phys_id)
1668 /* Do it the old way */
1669 return dev->ethtool_ops->phys_id(dev, id.data);
1670
1671 rc = dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE);
1672 if (rc < 0)
1673 return rc;
1674
1675 /* Drop the RTNL lock while waiting, but prevent reentry or
1676 * removal of the device.
1677 */
1678 busy = true;
1679 dev_hold(dev);
1680 rtnl_unlock();
1681
1682 if (rc == 0) {
1683 /* Driver will handle this itself */
1684 schedule_timeout_interruptible(
1685 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT);
1686 } else {
1687 /* Driver expects to be called at twice the frequency in rc */
1688 int n = rc * 2, i, interval = HZ / n;
1689
1690 /* Count down seconds */
1691 do {
1692 /* Count down iterations per second */
1693 i = n;
1694 do {
1695 rtnl_lock();
1696 rc = dev->ethtool_ops->set_phys_id(dev,
1697 (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
1698 rtnl_unlock();
1699 if (rc)
1700 break;
1701 schedule_timeout_interruptible(interval);
1702 } while (!signal_pending(current) && --i != 0);
1703 } while (!signal_pending(current) &&
1704 (id.data == 0 || --id.data != 0));
1705 }
1706
1707 rtnl_lock();
1708 dev_put(dev);
1709 busy = false;
1710
1711 (void)dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
1712 return rc;
1629} 1713}
1630 1714
1631static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) 1715static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
@@ -1953,6 +2037,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1953 case ETHTOOL_SGRO: 2037 case ETHTOOL_SGRO:
1954 rc = ethtool_set_one_feature(dev, useraddr, ethcmd); 2038 rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
1955 break; 2039 break;
2040 case ETHTOOL_GCHANNELS:
2041 rc = ethtool_get_channels(dev, useraddr);
2042 break;
2043 case ETHTOOL_SCHANNELS:
2044 rc = ethtool_set_channels(dev, useraddr);
2045 break;
1956 default: 2046 default:
1957 rc = -EOPNOTSUPP; 2047 rc = -EOPNOTSUPP;
1958 } 2048 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 8248ebb5891d..3911586e12e4 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -590,7 +590,8 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
590 int idx = 0; 590 int idx = 0;
591 struct fib_rule *rule; 591 struct fib_rule *rule;
592 592
593 list_for_each_entry(rule, &ops->rules_list, list) { 593 rcu_read_lock();
594 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
594 if (idx < cb->args[1]) 595 if (idx < cb->args[1])
595 goto skip; 596 goto skip;
596 597
diff --git a/net/core/filter.c b/net/core/filter.c
index afb8afb066bb..0eb8c4466eaa 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -39,65 +39,6 @@
39#include <linux/filter.h> 39#include <linux/filter.h>
40#include <linux/reciprocal_div.h> 40#include <linux/reciprocal_div.h>
41 41
42enum {
43 BPF_S_RET_K = 1,
44 BPF_S_RET_A,
45 BPF_S_ALU_ADD_K,
46 BPF_S_ALU_ADD_X,
47 BPF_S_ALU_SUB_K,
48 BPF_S_ALU_SUB_X,
49 BPF_S_ALU_MUL_K,
50 BPF_S_ALU_MUL_X,
51 BPF_S_ALU_DIV_X,
52 BPF_S_ALU_AND_K,
53 BPF_S_ALU_AND_X,
54 BPF_S_ALU_OR_K,
55 BPF_S_ALU_OR_X,
56 BPF_S_ALU_LSH_K,
57 BPF_S_ALU_LSH_X,
58 BPF_S_ALU_RSH_K,
59 BPF_S_ALU_RSH_X,
60 BPF_S_ALU_NEG,
61 BPF_S_LD_W_ABS,
62 BPF_S_LD_H_ABS,
63 BPF_S_LD_B_ABS,
64 BPF_S_LD_W_LEN,
65 BPF_S_LD_W_IND,
66 BPF_S_LD_H_IND,
67 BPF_S_LD_B_IND,
68 BPF_S_LD_IMM,
69 BPF_S_LDX_W_LEN,
70 BPF_S_LDX_B_MSH,
71 BPF_S_LDX_IMM,
72 BPF_S_MISC_TAX,
73 BPF_S_MISC_TXA,
74 BPF_S_ALU_DIV_K,
75 BPF_S_LD_MEM,
76 BPF_S_LDX_MEM,
77 BPF_S_ST,
78 BPF_S_STX,
79 BPF_S_JMP_JA,
80 BPF_S_JMP_JEQ_K,
81 BPF_S_JMP_JEQ_X,
82 BPF_S_JMP_JGE_K,
83 BPF_S_JMP_JGE_X,
84 BPF_S_JMP_JGT_K,
85 BPF_S_JMP_JGT_X,
86 BPF_S_JMP_JSET_K,
87 BPF_S_JMP_JSET_X,
88 /* Ancillary data */
89 BPF_S_ANC_PROTOCOL,
90 BPF_S_ANC_PKTTYPE,
91 BPF_S_ANC_IFINDEX,
92 BPF_S_ANC_NLATTR,
93 BPF_S_ANC_NLATTR_NEST,
94 BPF_S_ANC_MARK,
95 BPF_S_ANC_QUEUE,
96 BPF_S_ANC_HATYPE,
97 BPF_S_ANC_RXHASH,
98 BPF_S_ANC_CPU,
99};
100
101/* No hurry in this branch */ 42/* No hurry in this branch */
102static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) 43static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
103{ 44{
@@ -145,7 +86,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
145 rcu_read_lock(); 86 rcu_read_lock();
146 filter = rcu_dereference(sk->sk_filter); 87 filter = rcu_dereference(sk->sk_filter);
147 if (filter) { 88 if (filter) {
148 unsigned int pkt_len = sk_run_filter(skb, filter->insns); 89 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
149 90
150 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; 91 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
151 } 92 }
@@ -638,6 +579,7 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
638{ 579{
639 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 580 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
640 581
582 bpf_jit_free(fp);
641 kfree(fp); 583 kfree(fp);
642} 584}
643EXPORT_SYMBOL(sk_filter_release_rcu); 585EXPORT_SYMBOL(sk_filter_release_rcu);
@@ -672,6 +614,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
672 614
673 atomic_set(&fp->refcnt, 1); 615 atomic_set(&fp->refcnt, 1);
674 fp->len = fprog->len; 616 fp->len = fprog->len;
617 fp->bpf_func = sk_run_filter;
675 618
676 err = sk_chk_filter(fp->insns, fp->len); 619 err = sk_chk_filter(fp->insns, fp->len);
677 if (err) { 620 if (err) {
@@ -679,6 +622,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
679 return err; 622 return err;
680 } 623 }
681 624
625 bpf_jit_compile(fp);
626
682 old_fp = rcu_dereference_protected(sk->sk_filter, 627 old_fp = rcu_dereference_protected(sk->sk_filter,
683 sock_owned_by_user(sk)); 628 sock_owned_by_user(sk));
684 rcu_assign_pointer(sk->sk_filter, fp); 629 rcu_assign_pointer(sk->sk_filter, fp);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 5ceb257e860c..381813eae46c 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -28,6 +28,7 @@
28static const char fmt_hex[] = "%#x\n"; 28static const char fmt_hex[] = "%#x\n";
29static const char fmt_long_hex[] = "%#lx\n"; 29static const char fmt_long_hex[] = "%#lx\n";
30static const char fmt_dec[] = "%d\n"; 30static const char fmt_dec[] = "%d\n";
31static const char fmt_udec[] = "%u\n";
31static const char fmt_ulong[] = "%lu\n"; 32static const char fmt_ulong[] = "%lu\n";
32static const char fmt_u64[] = "%llu\n"; 33static const char fmt_u64[] = "%llu\n";
33 34
@@ -145,13 +146,10 @@ static ssize_t show_speed(struct device *dev,
145 if (!rtnl_trylock()) 146 if (!rtnl_trylock())
146 return restart_syscall(); 147 return restart_syscall();
147 148
148 if (netif_running(netdev) && 149 if (netif_running(netdev)) {
149 netdev->ethtool_ops && 150 struct ethtool_cmd cmd;
150 netdev->ethtool_ops->get_settings) { 151 if (!dev_ethtool_get_settings(netdev, &cmd))
151 struct ethtool_cmd cmd = { ETHTOOL_GSET }; 152 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
152
153 if (!netdev->ethtool_ops->get_settings(netdev, &cmd))
154 ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
155 } 153 }
156 rtnl_unlock(); 154 rtnl_unlock();
157 return ret; 155 return ret;
@@ -166,13 +164,11 @@ static ssize_t show_duplex(struct device *dev,
166 if (!rtnl_trylock()) 164 if (!rtnl_trylock())
167 return restart_syscall(); 165 return restart_syscall();
168 166
169 if (netif_running(netdev) && 167 if (netif_running(netdev)) {
170 netdev->ethtool_ops && 168 struct ethtool_cmd cmd;
171 netdev->ethtool_ops->get_settings) { 169 if (!dev_ethtool_get_settings(netdev, &cmd))
172 struct ethtool_cmd cmd = { ETHTOOL_GSET }; 170 ret = sprintf(buf, "%s\n",
173 171 cmd.duplex ? "full" : "half");
174 if (!netdev->ethtool_ops->get_settings(netdev, &cmd))
175 ret = sprintf(buf, "%s\n", cmd.duplex ? "full" : "half");
176 } 172 }
177 rtnl_unlock(); 173 rtnl_unlock();
178 return ret; 174 return ret;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 3f860261c5ee..1abb50841046 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -216,11 +216,14 @@ static void net_free(struct net *net)
216 kmem_cache_free(net_cachep, net); 216 kmem_cache_free(net_cachep, net);
217} 217}
218 218
219static struct net *net_create(void) 219struct net *copy_net_ns(unsigned long flags, struct net *old_net)
220{ 220{
221 struct net *net; 221 struct net *net;
222 int rv; 222 int rv;
223 223
224 if (!(flags & CLONE_NEWNET))
225 return get_net(old_net);
226
224 net = net_alloc(); 227 net = net_alloc();
225 if (!net) 228 if (!net)
226 return ERR_PTR(-ENOMEM); 229 return ERR_PTR(-ENOMEM);
@@ -239,13 +242,6 @@ static struct net *net_create(void)
239 return net; 242 return net;
240} 243}
241 244
242struct net *copy_net_ns(unsigned long flags, struct net *old_net)
243{
244 if (!(flags & CLONE_NEWNET))
245 return get_net(old_net);
246 return net_create();
247}
248
249static DEFINE_SPINLOCK(cleanup_list_lock); 245static DEFINE_SPINLOCK(cleanup_list_lock);
250static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 246static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
251 247
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 06be2431753e..46d9c3a4de2f 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -539,7 +539,7 @@ int __netpoll_rx(struct sk_buff *skb)
539{ 539{
540 int proto, len, ulen; 540 int proto, len, ulen;
541 int hits = 0; 541 int hits = 0;
542 struct iphdr *iph; 542 const struct iphdr *iph;
543 struct udphdr *uh; 543 struct udphdr *uh;
544 struct netpoll_info *npinfo = skb->dev->npinfo; 544 struct netpoll_info *npinfo = skb->dev->npinfo;
545 struct netpoll *np, *tmp; 545 struct netpoll *np, *tmp;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index aeeece72b72f..ff79d94b5944 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2514,7 +2514,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2514{ 2514{
2515 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2515 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2516 int err = 0; 2516 int err = 0;
2517 struct iphdr *iph;
2518 2517
2519 if (!x) 2518 if (!x)
2520 return 0; 2519 return 0;
@@ -2524,7 +2523,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2524 return 0; 2523 return 0;
2525 2524
2526 spin_lock(&x->lock); 2525 spin_lock(&x->lock);
2527 iph = ip_hdr(skb);
2528 2526
2529 err = x->outer_mode->output(x, skb); 2527 err = x->outer_mode->output(x, skb);
2530 if (err) 2528 if (err)
@@ -2624,6 +2622,7 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2624 } else { 2622 } else {
2625 int frags = pkt_dev->nfrags; 2623 int frags = pkt_dev->nfrags;
2626 int i, len; 2624 int i, len;
2625 int frag_len;
2627 2626
2628 2627
2629 if (frags > MAX_SKB_FRAGS) 2628 if (frags > MAX_SKB_FRAGS)
@@ -2635,6 +2634,8 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2635 } 2634 }
2636 2635
2637 i = 0; 2636 i = 0;
2637 frag_len = (datalen/frags) < PAGE_SIZE ?
2638 (datalen/frags) : PAGE_SIZE;
2638 while (datalen > 0) { 2639 while (datalen > 0) {
2639 if (unlikely(!pkt_dev->page)) { 2640 if (unlikely(!pkt_dev->page)) {
2640 int node = numa_node_id(); 2641 int node = numa_node_id();
@@ -2648,38 +2649,18 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2648 skb_shinfo(skb)->frags[i].page = pkt_dev->page; 2649 skb_shinfo(skb)->frags[i].page = pkt_dev->page;
2649 get_page(pkt_dev->page); 2650 get_page(pkt_dev->page);
2650 skb_shinfo(skb)->frags[i].page_offset = 0; 2651 skb_shinfo(skb)->frags[i].page_offset = 0;
2651 skb_shinfo(skb)->frags[i].size = 2652 /*last fragment, fill rest of data*/
2652 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2653 if (i == (frags - 1))
2654 skb_shinfo(skb)->frags[i].size =
2655 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
2656 else
2657 skb_shinfo(skb)->frags[i].size = frag_len;
2653 datalen -= skb_shinfo(skb)->frags[i].size; 2658 datalen -= skb_shinfo(skb)->frags[i].size;
2654 skb->len += skb_shinfo(skb)->frags[i].size; 2659 skb->len += skb_shinfo(skb)->frags[i].size;
2655 skb->data_len += skb_shinfo(skb)->frags[i].size; 2660 skb->data_len += skb_shinfo(skb)->frags[i].size;
2656 i++; 2661 i++;
2657 skb_shinfo(skb)->nr_frags = i; 2662 skb_shinfo(skb)->nr_frags = i;
2658 } 2663 }
2659
2660 while (i < frags) {
2661 int rem;
2662
2663 if (i == 0)
2664 break;
2665
2666 rem = skb_shinfo(skb)->frags[i - 1].size / 2;
2667 if (rem == 0)
2668 break;
2669
2670 skb_shinfo(skb)->frags[i - 1].size -= rem;
2671
2672 skb_shinfo(skb)->frags[i] =
2673 skb_shinfo(skb)->frags[i - 1];
2674 get_page(skb_shinfo(skb)->frags[i].page);
2675 skb_shinfo(skb)->frags[i].page =
2676 skb_shinfo(skb)->frags[i - 1].page;
2677 skb_shinfo(skb)->frags[i].page_offset +=
2678 skb_shinfo(skb)->frags[i - 1].size;
2679 skb_shinfo(skb)->frags[i].size = rem;
2680 i++;
2681 skb_shinfo(skb)->nr_frags = i;
2682 }
2683 } 2664 }
2684 2665
2685 /* Stamp the time, and sequence number, 2666 /* Stamp the time, and sequence number,
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d7c4bb4b1820..5a160f4a1ba0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1007,10 +1007,11 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1007 s_h = cb->args[0]; 1007 s_h = cb->args[0];
1008 s_idx = cb->args[1]; 1008 s_idx = cb->args[1];
1009 1009
1010 rcu_read_lock();
1010 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1011 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1011 idx = 0; 1012 idx = 0;
1012 head = &net->dev_index_head[h]; 1013 head = &net->dev_index_head[h];
1013 hlist_for_each_entry(dev, node, head, index_hlist) { 1014 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1014 if (idx < s_idx) 1015 if (idx < s_idx)
1015 goto cont; 1016 goto cont;
1016 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1017 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -1023,6 +1024,7 @@ cont:
1023 } 1024 }
1024 } 1025 }
1025out: 1026out:
1027 rcu_read_unlock();
1026 cb->args[1] = idx; 1028 cb->args[1] = idx;
1027 cb->args[0] = h; 1029 cb->args[0] = h;
1028 1030
@@ -1570,12 +1572,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1570 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 1572 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
1571 dev->real_num_tx_queues = real_num_queues; 1573 dev->real_num_tx_queues = real_num_queues;
1572 1574
1573 if (strchr(dev->name, '%')) {
1574 err = dev_alloc_name(dev, dev->name);
1575 if (err < 0)
1576 goto err_free;
1577 }
1578
1579 if (tb[IFLA_MTU]) 1575 if (tb[IFLA_MTU])
1580 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 1576 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
1581 if (tb[IFLA_ADDRESS]) 1577 if (tb[IFLA_ADDRESS])
@@ -1595,8 +1591,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1595 1591
1596 return dev; 1592 return dev;
1597 1593
1598err_free:
1599 free_netdev(dev);
1600err: 1594err:
1601 return ERR_PTR(err); 1595 return ERR_PTR(err);
1602} 1596}
@@ -1879,7 +1873,6 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1879 int min_len; 1873 int min_len;
1880 int family; 1874 int family;
1881 int type; 1875 int type;
1882 int err;
1883 1876
1884 type = nlh->nlmsg_type; 1877 type = nlh->nlmsg_type;
1885 if (type > RTM_MAX) 1878 if (type > RTM_MAX)
@@ -1906,11 +1899,8 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1906 if (dumpit == NULL) 1899 if (dumpit == NULL)
1907 return -EOPNOTSUPP; 1900 return -EOPNOTSUPP;
1908 1901
1909 __rtnl_unlock();
1910 rtnl = net->rtnl; 1902 rtnl = net->rtnl;
1911 err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); 1903 return netlink_dump_start(rtnl, skb, nlh, dumpit, NULL);
1912 rtnl_lock();
1913 return err;
1914 } 1904 }
1915 1905
1916 memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); 1906 memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
@@ -1980,7 +1970,7 @@ static int __net_init rtnetlink_net_init(struct net *net)
1980{ 1970{
1981 struct sock *sk; 1971 struct sock *sk;
1982 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, 1972 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
1983 rtnetlink_rcv, &rtnl_mutex, THIS_MODULE); 1973 rtnetlink_rcv, NULL, THIS_MODULE);
1984 if (!sk) 1974 if (!sk)
1985 return -ENOMEM; 1975 return -ENOMEM;
1986 net->rtnl = sk; 1976 net->rtnl = sk;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 385b6095fdc4..a829e3f60aeb 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -122,6 +122,15 @@ static struct ctl_table net_core_table[] = {
122 .mode = 0644, 122 .mode = 0644,
123 .proc_handler = proc_dointvec 123 .proc_handler = proc_dointvec
124 }, 124 },
125#ifdef CONFIG_BPF_JIT
126 {
127 .procname = "bpf_jit_enable",
128 .data = &bpf_jit_enable,
129 .maxlen = sizeof(int),
130 .mode = 0644,
131 .proc_handler = proc_dointvec
132 },
133#endif
125 { 134 {
126 .procname = "netdev_tstamp_prequeue", 135 .procname = "netdev_tstamp_prequeue",
127 .data = &netdev_tstamp_prequeue, 136 .data = &netdev_tstamp_prequeue,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ae451c6d83ba..36700a46b245 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -40,13 +40,15 @@
40 40
41int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 41int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
42{ 42{
43 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
43 struct inet_sock *inet = inet_sk(sk); 44 struct inet_sock *inet = inet_sk(sk);
44 struct dccp_sock *dp = dccp_sk(sk); 45 struct dccp_sock *dp = dccp_sk(sk);
45 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
46 __be16 orig_sport, orig_dport; 46 __be16 orig_sport, orig_dport;
47 struct rtable *rt;
48 __be32 daddr, nexthop; 47 __be32 daddr, nexthop;
48 struct flowi4 fl4;
49 struct rtable *rt;
49 int err; 50 int err;
51 struct ip_options_rcu *inet_opt;
50 52
51 dp->dccps_role = DCCP_ROLE_CLIENT; 53 dp->dccps_role = DCCP_ROLE_CLIENT;
52 54
@@ -57,15 +59,18 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
57 return -EAFNOSUPPORT; 59 return -EAFNOSUPPORT;
58 60
59 nexthop = daddr = usin->sin_addr.s_addr; 61 nexthop = daddr = usin->sin_addr.s_addr;
60 if (inet->opt != NULL && inet->opt->srr) { 62
63 inet_opt = rcu_dereference_protected(inet->inet_opt,
64 sock_owned_by_user(sk));
65 if (inet_opt != NULL && inet_opt->opt.srr) {
61 if (daddr == 0) 66 if (daddr == 0)
62 return -EINVAL; 67 return -EINVAL;
63 nexthop = inet->opt->faddr; 68 nexthop = inet_opt->opt.faddr;
64 } 69 }
65 70
66 orig_sport = inet->inet_sport; 71 orig_sport = inet->inet_sport;
67 orig_dport = usin->sin_port; 72 orig_dport = usin->sin_port;
68 rt = ip_route_connect(nexthop, inet->inet_saddr, 73 rt = ip_route_connect(&fl4, nexthop, inet->inet_saddr,
69 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 74 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
70 IPPROTO_DCCP, 75 IPPROTO_DCCP,
71 orig_sport, orig_dport, sk, true); 76 orig_sport, orig_dport, sk, true);
@@ -77,19 +82,19 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
77 return -ENETUNREACH; 82 return -ENETUNREACH;
78 } 83 }
79 84
80 if (inet->opt == NULL || !inet->opt->srr) 85 if (inet_opt == NULL || !inet_opt->opt.srr)
81 daddr = rt->rt_dst; 86 daddr = fl4.daddr;
82 87
83 if (inet->inet_saddr == 0) 88 if (inet->inet_saddr == 0)
84 inet->inet_saddr = rt->rt_src; 89 inet->inet_saddr = fl4.saddr;
85 inet->inet_rcv_saddr = inet->inet_saddr; 90 inet->inet_rcv_saddr = inet->inet_saddr;
86 91
87 inet->inet_dport = usin->sin_port; 92 inet->inet_dport = usin->sin_port;
88 inet->inet_daddr = daddr; 93 inet->inet_daddr = daddr;
89 94
90 inet_csk(sk)->icsk_ext_hdr_len = 0; 95 inet_csk(sk)->icsk_ext_hdr_len = 0;
91 if (inet->opt != NULL) 96 if (inet_opt)
92 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; 97 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
93 /* 98 /*
94 * Socket identity is still unknown (sport may be zero). 99 * Socket identity is still unknown (sport may be zero).
95 * However we set state to DCCP_REQUESTING and not releasing socket 100 * However we set state to DCCP_REQUESTING and not releasing socket
@@ -101,8 +106,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
101 if (err != 0) 106 if (err != 0)
102 goto failure; 107 goto failure;
103 108
104 rt = ip_route_newports(rt, IPPROTO_DCCP, 109 rt = ip_route_newports(&fl4, rt, orig_sport, orig_dport,
105 orig_sport, orig_dport,
106 inet->inet_sport, inet->inet_dport, sk); 110 inet->inet_sport, inet->inet_dport, sk);
107 if (IS_ERR(rt)) { 111 if (IS_ERR(rt)) {
108 rt = NULL; 112 rt = NULL;
@@ -405,7 +409,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
405 newinet->inet_daddr = ireq->rmt_addr; 409 newinet->inet_daddr = ireq->rmt_addr;
406 newinet->inet_rcv_saddr = ireq->loc_addr; 410 newinet->inet_rcv_saddr = ireq->loc_addr;
407 newinet->inet_saddr = ireq->loc_addr; 411 newinet->inet_saddr = ireq->loc_addr;
408 newinet->opt = ireq->opt; 412 newinet->inet_opt = ireq->opt;
409 ireq->opt = NULL; 413 ireq->opt = NULL;
410 newinet->mc_index = inet_iif(skb); 414 newinet->mc_index = inet_iif(skb);
411 newinet->mc_ttl = ip_hdr(skb)->ttl; 415 newinet->mc_ttl = ip_hdr(skb)->ttl;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index de1b7e37ad5b..8dc4348774a5 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -54,8 +54,8 @@ static void dccp_v6_hash(struct sock *sk)
54 54
55/* add pseudo-header to DCCP checksum stored in skb->csum */ 55/* add pseudo-header to DCCP checksum stored in skb->csum */
56static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, 56static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
57 struct in6_addr *saddr, 57 const struct in6_addr *saddr,
58 struct in6_addr *daddr) 58 const struct in6_addr *daddr)
59{ 59{
60 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); 60 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
61} 61}
@@ -87,7 +87,7 @@ static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
87static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 87static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
88 u8 type, u8 code, int offset, __be32 info) 88 u8 type, u8 code, int offset, __be32 info)
89{ 89{
90 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data; 90 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
92 struct dccp_sock *dp; 92 struct dccp_sock *dp;
93 struct ipv6_pinfo *np; 93 struct ipv6_pinfo *np;
@@ -296,7 +296,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req)
296 296
297static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) 297static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
298{ 298{
299 struct ipv6hdr *rxip6h; 299 const struct ipv6hdr *rxip6h;
300 struct sk_buff *skb; 300 struct sk_buff *skb;
301 struct flowi6 fl6; 301 struct flowi6 fl6;
302 struct net *net = dev_net(skb_dst(rxskb)->dev); 302 struct net *net = dev_net(skb_dst(rxskb)->dev);
@@ -573,7 +573,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
573 573
574 First: no IPv4 options. 574 First: no IPv4 options.
575 */ 575 */
576 newinet->opt = NULL; 576 newinet->inet_opt = NULL;
577 577
578 /* Clone RX bits */ 578 /* Clone RX bits */
579 newnp->rxopt.all = np->rxopt.all; 579 newnp->rxopt.all = np->rxopt.all;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 0dcaa903e00e..404fa1591027 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -752,7 +752,8 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
752 skip_naddr = cb->args[1]; 752 skip_naddr = cb->args[1];
753 753
754 idx = 0; 754 idx = 0;
755 for_each_netdev(&init_net, dev) { 755 rcu_read_lock();
756 for_each_netdev_rcu(&init_net, dev) {
756 if (idx < skip_ndevs) 757 if (idx < skip_ndevs)
757 goto cont; 758 goto cont;
758 else if (idx > skip_ndevs) { 759 else if (idx > skip_ndevs) {
@@ -761,11 +762,11 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
761 skip_naddr = 0; 762 skip_naddr = 0;
762 } 763 }
763 764
764 if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) 765 if ((dn_db = rcu_dereference(dev->dn_ptr)) == NULL)
765 goto cont; 766 goto cont;
766 767
767 for (ifa = rtnl_dereference(dn_db->ifa_list), dn_idx = 0; ifa; 768 for (ifa = rcu_dereference(dn_db->ifa_list), dn_idx = 0; ifa;
768 ifa = rtnl_dereference(ifa->ifa_next), dn_idx++) { 769 ifa = rcu_dereference(ifa->ifa_next), dn_idx++) {
769 if (dn_idx < skip_naddr) 770 if (dn_idx < skip_naddr)
770 continue; 771 continue;
771 772
@@ -778,6 +779,7 @@ cont:
778 idx++; 779 idx++;
779 } 780 }
780done: 781done:
782 rcu_read_unlock();
781 cb->args[0] = idx; 783 cb->args[0] = idx;
782 cb->args[1] = dn_idx; 784 cb->args[1] = dn_idx;
783 785
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 9f09d4fc2880..74544bc6fdec 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1125,13 +1125,11 @@ make_route:
1125 if (dev_out->flags & IFF_LOOPBACK) 1125 if (dev_out->flags & IFF_LOOPBACK)
1126 flags |= RTCF_LOCAL; 1126 flags |= RTCF_LOCAL;
1127 1127
1128 rt = dst_alloc(&dn_dst_ops, 0); 1128 rt = dst_alloc(&dn_dst_ops, dev_out, 1, 0, DST_HOST);
1129 if (rt == NULL) 1129 if (rt == NULL)
1130 goto e_nobufs; 1130 goto e_nobufs;
1131 1131
1132 atomic_set(&rt->dst.__refcnt, 1); 1132 memset(&rt->fld, 0, sizeof(rt->fld));
1133 rt->dst.flags = DST_HOST;
1134
1135 rt->fld.saddr = oldflp->saddr; 1133 rt->fld.saddr = oldflp->saddr;
1136 rt->fld.daddr = oldflp->daddr; 1134 rt->fld.daddr = oldflp->daddr;
1137 rt->fld.flowidn_oif = oldflp->flowidn_oif; 1135 rt->fld.flowidn_oif = oldflp->flowidn_oif;
@@ -1146,8 +1144,6 @@ make_route:
1146 rt->rt_dst_map = fld.daddr; 1144 rt->rt_dst_map = fld.daddr;
1147 rt->rt_src_map = fld.saddr; 1145 rt->rt_src_map = fld.saddr;
1148 1146
1149 rt->dst.dev = dev_out;
1150 dev_hold(dev_out);
1151 rt->dst.neighbour = neigh; 1147 rt->dst.neighbour = neigh;
1152 neigh = NULL; 1148 neigh = NULL;
1153 1149
@@ -1399,10 +1395,11 @@ static int dn_route_input_slow(struct sk_buff *skb)
1399 } 1395 }
1400 1396
1401make_route: 1397make_route:
1402 rt = dst_alloc(&dn_dst_ops, 0); 1398 rt = dst_alloc(&dn_dst_ops, out_dev, 0, 0, DST_HOST);
1403 if (rt == NULL) 1399 if (rt == NULL)
1404 goto e_nobufs; 1400 goto e_nobufs;
1405 1401
1402 memset(&rt->fld, 0, sizeof(rt->fld));
1406 rt->rt_saddr = fld.saddr; 1403 rt->rt_saddr = fld.saddr;
1407 rt->rt_daddr = fld.daddr; 1404 rt->rt_daddr = fld.daddr;
1408 rt->rt_gateway = fld.daddr; 1405 rt->rt_gateway = fld.daddr;
@@ -1419,9 +1416,7 @@ make_route:
1419 rt->fld.flowidn_iif = in_dev->ifindex; 1416 rt->fld.flowidn_iif = in_dev->ifindex;
1420 rt->fld.flowidn_mark = fld.flowidn_mark; 1417 rt->fld.flowidn_mark = fld.flowidn_mark;
1421 1418
1422 rt->dst.flags = DST_HOST;
1423 rt->dst.neighbour = neigh; 1419 rt->dst.neighbour = neigh;
1424 rt->dst.dev = out_dev;
1425 rt->dst.lastuse = jiffies; 1420 rt->dst.lastuse = jiffies;
1426 rt->dst.output = dn_rt_bug; 1421 rt->dst.output = dn_rt_bug;
1427 switch(res.type) { 1422 switch(res.type) {
@@ -1440,8 +1435,6 @@ make_route:
1440 rt->dst.input = dst_discard; 1435 rt->dst.input = dst_discard;
1441 } 1436 }
1442 rt->rt_flags = flags; 1437 rt->rt_flags = flags;
1443 if (rt->dst.dev)
1444 dev_hold(rt->dst.dev);
1445 1438
1446 err = dn_rt_set_next_hop(rt, &res); 1439 err = dn_rt_set_next_hop(rt, &res);
1447 if (err) 1440 if (err)
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 99d8d3a40998..bd0a52dd1d40 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -123,11 +123,11 @@ static inline void dn_rebuild_zone(struct dn_zone *dz,
123 struct dn_fib_node **old_ht, 123 struct dn_fib_node **old_ht,
124 int old_divisor) 124 int old_divisor)
125{ 125{
126 int i;
127 struct dn_fib_node *f, **fp, *next; 126 struct dn_fib_node *f, **fp, *next;
127 int i;
128 128
129 for(i = 0; i < old_divisor; i++) { 129 for(i = 0; i < old_divisor; i++) {
130 for(f = old_ht[i]; f; f = f->fn_next) { 130 for(f = old_ht[i]; f; f = next) {
131 next = f->fn_next; 131 next = f->fn_next;
132 for(fp = dn_chain_p(f->fn_key, dz); 132 for(fp = dn_chain_p(f->fn_key, dz);
133 *fp && dn_key_leq((*fp)->fn_key, f->fn_key); 133 *fp && dn_key_leq((*fp)->fn_key, f->fn_key);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 64ca2a6fa0d4..0a47b6c37038 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -288,7 +288,6 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
288 .get_drvinfo = dsa_slave_get_drvinfo, 288 .get_drvinfo = dsa_slave_get_drvinfo,
289 .nway_reset = dsa_slave_nway_reset, 289 .nway_reset = dsa_slave_nway_reset,
290 .get_link = dsa_slave_get_link, 290 .get_link = dsa_slave_get_link,
291 .set_sg = ethtool_op_set_sg,
292 .get_strings = dsa_slave_get_strings, 291 .get_strings = dsa_slave_get_strings,
293 .get_ethtool_stats = dsa_slave_get_ethtool_stats, 292 .get_ethtool_stats = dsa_slave_get_ethtool_stats,
294 .get_sset_count = dsa_slave_get_sset_count, 293 .get_sset_count = dsa_slave_get_sset_count,
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 116d3fd3d669..a1d9f3787dd5 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -935,7 +935,6 @@ static void aun_data_available(struct sock *sk, int slen)
935 struct sk_buff *skb; 935 struct sk_buff *skb;
936 unsigned char *data; 936 unsigned char *data;
937 struct aunhdr *ah; 937 struct aunhdr *ah;
938 struct iphdr *ip;
939 size_t len; 938 size_t len;
940 939
941 while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) { 940 while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
@@ -949,7 +948,6 @@ static void aun_data_available(struct sock *sk, int slen)
949 data = skb_transport_header(skb) + sizeof(struct udphdr); 948 data = skb_transport_header(skb) + sizeof(struct udphdr);
950 ah = (struct aunhdr *)data; 949 ah = (struct aunhdr *)data;
951 len = skb->len - sizeof(struct udphdr); 950 len = skb->len - sizeof(struct udphdr);
952 ip = ip_hdr(skb);
953 951
954 switch (ah->code) 952 switch (ah->code)
955 { 953 {
@@ -962,12 +960,6 @@ static void aun_data_available(struct sock *sk, int slen)
962 case 4: 960 case 4:
963 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING); 961 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
964 break; 962 break;
965#if 0
966 /* This isn't quite right yet. */
967 case 5:
968 aun_send_response(ip->saddr, ah->handle, 6, ah->cb);
969 break;
970#endif
971 default: 963 default:
972 printk(KERN_DEBUG "unknown AUN packet (type %d)\n", data[0]); 964 printk(KERN_DEBUG "unknown AUN packet (type %d)\n", data[0]);
973 } 965 }
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 807d83c02ef6..7b91fa8bf83c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -153,7 +153,7 @@ void inet_sock_destruct(struct sock *sk)
153 WARN_ON(sk->sk_wmem_queued); 153 WARN_ON(sk->sk_wmem_queued);
154 WARN_ON(sk->sk_forward_alloc); 154 WARN_ON(sk->sk_forward_alloc);
155 155
156 kfree(inet->opt); 156 kfree(rcu_dereference_protected(inet->inet_opt, 1));
157 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); 157 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
158 sk_refcnt_debug_dec(sk); 158 sk_refcnt_debug_dec(sk);
159} 159}
@@ -1103,14 +1103,18 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1103 struct inet_sock *inet = inet_sk(sk); 1103 struct inet_sock *inet = inet_sk(sk);
1104 __be32 old_saddr = inet->inet_saddr; 1104 __be32 old_saddr = inet->inet_saddr;
1105 __be32 daddr = inet->inet_daddr; 1105 __be32 daddr = inet->inet_daddr;
1106 struct flowi4 fl4;
1106 struct rtable *rt; 1107 struct rtable *rt;
1107 __be32 new_saddr; 1108 __be32 new_saddr;
1109 struct ip_options_rcu *inet_opt;
1108 1110
1109 if (inet->opt && inet->opt->srr) 1111 inet_opt = rcu_dereference_protected(inet->inet_opt,
1110 daddr = inet->opt->faddr; 1112 sock_owned_by_user(sk));
1113 if (inet_opt && inet_opt->opt.srr)
1114 daddr = inet_opt->opt.faddr;
1111 1115
1112 /* Query new route. */ 1116 /* Query new route. */
1113 rt = ip_route_connect(daddr, 0, RT_CONN_FLAGS(sk), 1117 rt = ip_route_connect(&fl4, daddr, 0, RT_CONN_FLAGS(sk),
1114 sk->sk_bound_dev_if, sk->sk_protocol, 1118 sk->sk_bound_dev_if, sk->sk_protocol,
1115 inet->inet_sport, inet->inet_dport, sk, false); 1119 inet->inet_sport, inet->inet_dport, sk, false);
1116 if (IS_ERR(rt)) 1120 if (IS_ERR(rt))
@@ -1118,7 +1122,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1118 1122
1119 sk_setup_caps(sk, &rt->dst); 1123 sk_setup_caps(sk, &rt->dst);
1120 1124
1121 new_saddr = rt->rt_src; 1125 new_saddr = fl4.saddr;
1122 1126
1123 if (new_saddr == old_saddr) 1127 if (new_saddr == old_saddr)
1124 return 0; 1128 return 0;
@@ -1147,6 +1151,8 @@ int inet_sk_rebuild_header(struct sock *sk)
1147 struct inet_sock *inet = inet_sk(sk); 1151 struct inet_sock *inet = inet_sk(sk);
1148 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); 1152 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1149 __be32 daddr; 1153 __be32 daddr;
1154 struct ip_options_rcu *inet_opt;
1155 struct flowi4 fl4;
1150 int err; 1156 int err;
1151 1157
1152 /* Route is OK, nothing to do. */ 1158 /* Route is OK, nothing to do. */
@@ -1154,10 +1160,13 @@ int inet_sk_rebuild_header(struct sock *sk)
1154 return 0; 1160 return 0;
1155 1161
1156 /* Reroute. */ 1162 /* Reroute. */
1163 rcu_read_lock();
1164 inet_opt = rcu_dereference(inet->inet_opt);
1157 daddr = inet->inet_daddr; 1165 daddr = inet->inet_daddr;
1158 if (inet->opt && inet->opt->srr) 1166 if (inet_opt && inet_opt->opt.srr)
1159 daddr = inet->opt->faddr; 1167 daddr = inet_opt->opt.faddr;
1160 rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr, 1168 rcu_read_unlock();
1169 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr, inet->inet_saddr,
1161 inet->inet_dport, inet->inet_sport, 1170 inet->inet_dport, inet->inet_sport,
1162 sk->sk_protocol, RT_CONN_FLAGS(sk), 1171 sk->sk_protocol, RT_CONN_FLAGS(sk),
1163 sk->sk_bound_dev_if); 1172 sk->sk_bound_dev_if);
@@ -1186,7 +1195,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
1186 1195
1187static int inet_gso_send_check(struct sk_buff *skb) 1196static int inet_gso_send_check(struct sk_buff *skb)
1188{ 1197{
1189 struct iphdr *iph; 1198 const struct iphdr *iph;
1190 const struct net_protocol *ops; 1199 const struct net_protocol *ops;
1191 int proto; 1200 int proto;
1192 int ihl; 1201 int ihl;
@@ -1293,7 +1302,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1293 const struct net_protocol *ops; 1302 const struct net_protocol *ops;
1294 struct sk_buff **pp = NULL; 1303 struct sk_buff **pp = NULL;
1295 struct sk_buff *p; 1304 struct sk_buff *p;
1296 struct iphdr *iph; 1305 const struct iphdr *iph;
1297 unsigned int hlen; 1306 unsigned int hlen;
1298 unsigned int off; 1307 unsigned int off;
1299 unsigned int id; 1308 unsigned int id;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 4286fd3cc0e2..c1f4154552fc 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -73,7 +73,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
73 * into IP header for icv calculation. Options are already checked 73 * into IP header for icv calculation. Options are already checked
74 * for validity, so paranoia is not required. */ 74 * for validity, so paranoia is not required. */
75 75
76static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr) 76static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
77{ 77{
78 unsigned char * optptr = (unsigned char*)(iph+1); 78 unsigned char * optptr = (unsigned char*)(iph+1);
79 int l = iph->ihl*4 - sizeof(struct iphdr); 79 int l = iph->ihl*4 - sizeof(struct iphdr);
@@ -396,7 +396,7 @@ out:
396static void ah4_err(struct sk_buff *skb, u32 info) 396static void ah4_err(struct sk_buff *skb, u32 info)
397{ 397{
398 struct net *net = dev_net(skb->dev); 398 struct net *net = dev_net(skb->dev);
399 struct iphdr *iph = (struct iphdr *)skb->data; 399 const struct iphdr *iph = (const struct iphdr *)skb->data;
400 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); 400 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
401 struct xfrm_state *x; 401 struct xfrm_state *x;
402 402
@@ -404,7 +404,8 @@ static void ah4_err(struct sk_buff *skb, u32 info)
404 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 404 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
405 return; 405 return;
406 406
407 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET); 407 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
408 ah->spi, IPPROTO_AH, AF_INET);
408 if (!x) 409 if (!x)
409 return; 410 return;
410 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n", 411 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index a0af7ea87870..2b3c23c287cd 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1857,6 +1857,11 @@ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
1857 return CIPSO_V4_HDR_LEN + ret_val; 1857 return CIPSO_V4_HDR_LEN + ret_val;
1858} 1858}
1859 1859
1860static void opt_kfree_rcu(struct rcu_head *head)
1861{
1862 kfree(container_of(head, struct ip_options_rcu, rcu));
1863}
1864
1860/** 1865/**
1861 * cipso_v4_sock_setattr - Add a CIPSO option to a socket 1866 * cipso_v4_sock_setattr - Add a CIPSO option to a socket
1862 * @sk: the socket 1867 * @sk: the socket
@@ -1879,7 +1884,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
1879 unsigned char *buf = NULL; 1884 unsigned char *buf = NULL;
1880 u32 buf_len; 1885 u32 buf_len;
1881 u32 opt_len; 1886 u32 opt_len;
1882 struct ip_options *opt = NULL; 1887 struct ip_options_rcu *old, *opt = NULL;
1883 struct inet_sock *sk_inet; 1888 struct inet_sock *sk_inet;
1884 struct inet_connection_sock *sk_conn; 1889 struct inet_connection_sock *sk_conn;
1885 1890
@@ -1915,22 +1920,25 @@ int cipso_v4_sock_setattr(struct sock *sk,
1915 ret_val = -ENOMEM; 1920 ret_val = -ENOMEM;
1916 goto socket_setattr_failure; 1921 goto socket_setattr_failure;
1917 } 1922 }
1918 memcpy(opt->__data, buf, buf_len); 1923 memcpy(opt->opt.__data, buf, buf_len);
1919 opt->optlen = opt_len; 1924 opt->opt.optlen = opt_len;
1920 opt->cipso = sizeof(struct iphdr); 1925 opt->opt.cipso = sizeof(struct iphdr);
1921 kfree(buf); 1926 kfree(buf);
1922 buf = NULL; 1927 buf = NULL;
1923 1928
1924 sk_inet = inet_sk(sk); 1929 sk_inet = inet_sk(sk);
1930
1931 old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));
1925 if (sk_inet->is_icsk) { 1932 if (sk_inet->is_icsk) {
1926 sk_conn = inet_csk(sk); 1933 sk_conn = inet_csk(sk);
1927 if (sk_inet->opt) 1934 if (old)
1928 sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; 1935 sk_conn->icsk_ext_hdr_len -= old->opt.optlen;
1929 sk_conn->icsk_ext_hdr_len += opt->optlen; 1936 sk_conn->icsk_ext_hdr_len += opt->opt.optlen;
1930 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); 1937 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
1931 } 1938 }
1932 opt = xchg(&sk_inet->opt, opt); 1939 rcu_assign_pointer(sk_inet->inet_opt, opt);
1933 kfree(opt); 1940 if (old)
1941 call_rcu(&old->rcu, opt_kfree_rcu);
1934 1942
1935 return 0; 1943 return 0;
1936 1944
@@ -1960,7 +1968,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
1960 unsigned char *buf = NULL; 1968 unsigned char *buf = NULL;
1961 u32 buf_len; 1969 u32 buf_len;
1962 u32 opt_len; 1970 u32 opt_len;
1963 struct ip_options *opt = NULL; 1971 struct ip_options_rcu *opt = NULL;
1964 struct inet_request_sock *req_inet; 1972 struct inet_request_sock *req_inet;
1965 1973
1966 /* We allocate the maximum CIPSO option size here so we are probably 1974 /* We allocate the maximum CIPSO option size here so we are probably
@@ -1988,15 +1996,16 @@ int cipso_v4_req_setattr(struct request_sock *req,
1988 ret_val = -ENOMEM; 1996 ret_val = -ENOMEM;
1989 goto req_setattr_failure; 1997 goto req_setattr_failure;
1990 } 1998 }
1991 memcpy(opt->__data, buf, buf_len); 1999 memcpy(opt->opt.__data, buf, buf_len);
1992 opt->optlen = opt_len; 2000 opt->opt.optlen = opt_len;
1993 opt->cipso = sizeof(struct iphdr); 2001 opt->opt.cipso = sizeof(struct iphdr);
1994 kfree(buf); 2002 kfree(buf);
1995 buf = NULL; 2003 buf = NULL;
1996 2004
1997 req_inet = inet_rsk(req); 2005 req_inet = inet_rsk(req);
1998 opt = xchg(&req_inet->opt, opt); 2006 opt = xchg(&req_inet->opt, opt);
1999 kfree(opt); 2007 if (opt)
2008 call_rcu(&opt->rcu, opt_kfree_rcu);
2000 2009
2001 return 0; 2010 return 0;
2002 2011
@@ -2016,34 +2025,34 @@ req_setattr_failure:
2016 * values on failure. 2025 * values on failure.
2017 * 2026 *
2018 */ 2027 */
2019static int cipso_v4_delopt(struct ip_options **opt_ptr) 2028static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
2020{ 2029{
2021 int hdr_delta = 0; 2030 int hdr_delta = 0;
2022 struct ip_options *opt = *opt_ptr; 2031 struct ip_options_rcu *opt = *opt_ptr;
2023 2032
2024 if (opt->srr || opt->rr || opt->ts || opt->router_alert) { 2033 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
2025 u8 cipso_len; 2034 u8 cipso_len;
2026 u8 cipso_off; 2035 u8 cipso_off;
2027 unsigned char *cipso_ptr; 2036 unsigned char *cipso_ptr;
2028 int iter; 2037 int iter;
2029 int optlen_new; 2038 int optlen_new;
2030 2039
2031 cipso_off = opt->cipso - sizeof(struct iphdr); 2040 cipso_off = opt->opt.cipso - sizeof(struct iphdr);
2032 cipso_ptr = &opt->__data[cipso_off]; 2041 cipso_ptr = &opt->opt.__data[cipso_off];
2033 cipso_len = cipso_ptr[1]; 2042 cipso_len = cipso_ptr[1];
2034 2043
2035 if (opt->srr > opt->cipso) 2044 if (opt->opt.srr > opt->opt.cipso)
2036 opt->srr -= cipso_len; 2045 opt->opt.srr -= cipso_len;
2037 if (opt->rr > opt->cipso) 2046 if (opt->opt.rr > opt->opt.cipso)
2038 opt->rr -= cipso_len; 2047 opt->opt.rr -= cipso_len;
2039 if (opt->ts > opt->cipso) 2048 if (opt->opt.ts > opt->opt.cipso)
2040 opt->ts -= cipso_len; 2049 opt->opt.ts -= cipso_len;
2041 if (opt->router_alert > opt->cipso) 2050 if (opt->opt.router_alert > opt->opt.cipso)
2042 opt->router_alert -= cipso_len; 2051 opt->opt.router_alert -= cipso_len;
2043 opt->cipso = 0; 2052 opt->opt.cipso = 0;
2044 2053
2045 memmove(cipso_ptr, cipso_ptr + cipso_len, 2054 memmove(cipso_ptr, cipso_ptr + cipso_len,
2046 opt->optlen - cipso_off - cipso_len); 2055 opt->opt.optlen - cipso_off - cipso_len);
2047 2056
2048 /* determining the new total option length is tricky because of 2057 /* determining the new total option length is tricky because of
2049 * the padding necessary, the only thing i can think to do at 2058 * the padding necessary, the only thing i can think to do at
@@ -2052,21 +2061,21 @@ static int cipso_v4_delopt(struct ip_options **opt_ptr)
2052 * from there we can determine the new total option length */ 2061 * from there we can determine the new total option length */
2053 iter = 0; 2062 iter = 0;
2054 optlen_new = 0; 2063 optlen_new = 0;
2055 while (iter < opt->optlen) 2064 while (iter < opt->opt.optlen)
2056 if (opt->__data[iter] != IPOPT_NOP) { 2065 if (opt->opt.__data[iter] != IPOPT_NOP) {
2057 iter += opt->__data[iter + 1]; 2066 iter += opt->opt.__data[iter + 1];
2058 optlen_new = iter; 2067 optlen_new = iter;
2059 } else 2068 } else
2060 iter++; 2069 iter++;
2061 hdr_delta = opt->optlen; 2070 hdr_delta = opt->opt.optlen;
2062 opt->optlen = (optlen_new + 3) & ~3; 2071 opt->opt.optlen = (optlen_new + 3) & ~3;
2063 hdr_delta -= opt->optlen; 2072 hdr_delta -= opt->opt.optlen;
2064 } else { 2073 } else {
2065 /* only the cipso option was present on the socket so we can 2074 /* only the cipso option was present on the socket so we can
2066 * remove the entire option struct */ 2075 * remove the entire option struct */
2067 *opt_ptr = NULL; 2076 *opt_ptr = NULL;
2068 hdr_delta = opt->optlen; 2077 hdr_delta = opt->opt.optlen;
2069 kfree(opt); 2078 call_rcu(&opt->rcu, opt_kfree_rcu);
2070 } 2079 }
2071 2080
2072 return hdr_delta; 2081 return hdr_delta;
@@ -2083,15 +2092,15 @@ static int cipso_v4_delopt(struct ip_options **opt_ptr)
2083void cipso_v4_sock_delattr(struct sock *sk) 2092void cipso_v4_sock_delattr(struct sock *sk)
2084{ 2093{
2085 int hdr_delta; 2094 int hdr_delta;
2086 struct ip_options *opt; 2095 struct ip_options_rcu *opt;
2087 struct inet_sock *sk_inet; 2096 struct inet_sock *sk_inet;
2088 2097
2089 sk_inet = inet_sk(sk); 2098 sk_inet = inet_sk(sk);
2090 opt = sk_inet->opt; 2099 opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
2091 if (opt == NULL || opt->cipso == 0) 2100 if (opt == NULL || opt->opt.cipso == 0)
2092 return; 2101 return;
2093 2102
2094 hdr_delta = cipso_v4_delopt(&sk_inet->opt); 2103 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
2095 if (sk_inet->is_icsk && hdr_delta > 0) { 2104 if (sk_inet->is_icsk && hdr_delta > 0) {
2096 struct inet_connection_sock *sk_conn = inet_csk(sk); 2105 struct inet_connection_sock *sk_conn = inet_csk(sk);
2097 sk_conn->icsk_ext_hdr_len -= hdr_delta; 2106 sk_conn->icsk_ext_hdr_len -= hdr_delta;
@@ -2109,12 +2118,12 @@ void cipso_v4_sock_delattr(struct sock *sk)
2109 */ 2118 */
2110void cipso_v4_req_delattr(struct request_sock *req) 2119void cipso_v4_req_delattr(struct request_sock *req)
2111{ 2120{
2112 struct ip_options *opt; 2121 struct ip_options_rcu *opt;
2113 struct inet_request_sock *req_inet; 2122 struct inet_request_sock *req_inet;
2114 2123
2115 req_inet = inet_rsk(req); 2124 req_inet = inet_rsk(req);
2116 opt = req_inet->opt; 2125 opt = req_inet->opt;
2117 if (opt == NULL || opt->cipso == 0) 2126 if (opt == NULL || opt->opt.cipso == 0)
2118 return; 2127 return;
2119 2128
2120 cipso_v4_delopt(&req_inet->opt); 2129 cipso_v4_delopt(&req_inet->opt);
@@ -2184,14 +2193,18 @@ getattr_return:
2184 */ 2193 */
2185int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) 2194int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
2186{ 2195{
2187 struct ip_options *opt; 2196 struct ip_options_rcu *opt;
2197 int res = -ENOMSG;
2188 2198
2189 opt = inet_sk(sk)->opt; 2199 rcu_read_lock();
2190 if (opt == NULL || opt->cipso == 0) 2200 opt = rcu_dereference(inet_sk(sk)->inet_opt);
2191 return -ENOMSG; 2201 if (opt && opt->opt.cipso)
2192 2202 res = cipso_v4_getattr(opt->opt.__data +
2193 return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), 2203 opt->opt.cipso -
2194 secattr); 2204 sizeof(struct iphdr),
2205 secattr);
2206 rcu_read_unlock();
2207 return res;
2195} 2208}
2196 2209
2197/** 2210/**
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 85bd24ca4f6d..d5a2e6995bae 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -24,6 +24,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
24{ 24{
25 struct inet_sock *inet = inet_sk(sk); 25 struct inet_sock *inet = inet_sk(sk);
26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; 26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
27 struct flowi4 fl4;
27 struct rtable *rt; 28 struct rtable *rt;
28 __be32 saddr; 29 __be32 saddr;
29 int oif; 30 int oif;
@@ -46,7 +47,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
46 if (!saddr) 47 if (!saddr)
47 saddr = inet->mc_addr; 48 saddr = inet->mc_addr;
48 } 49 }
49 rt = ip_route_connect(usin->sin_addr.s_addr, saddr, 50 rt = ip_route_connect(&fl4, usin->sin_addr.s_addr, saddr,
50 RT_CONN_FLAGS(sk), oif, 51 RT_CONN_FLAGS(sk), oif,
51 sk->sk_protocol, 52 sk->sk_protocol,
52 inet->inet_sport, usin->sin_port, sk, true); 53 inet->inet_sport, usin->sin_port, sk, true);
@@ -62,13 +63,13 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
62 return -EACCES; 63 return -EACCES;
63 } 64 }
64 if (!inet->inet_saddr) 65 if (!inet->inet_saddr)
65 inet->inet_saddr = rt->rt_src; /* Update source address */ 66 inet->inet_saddr = fl4.saddr; /* Update source address */
66 if (!inet->inet_rcv_saddr) { 67 if (!inet->inet_rcv_saddr) {
67 inet->inet_rcv_saddr = rt->rt_src; 68 inet->inet_rcv_saddr = fl4.saddr;
68 if (sk->sk_prot->rehash) 69 if (sk->sk_prot->rehash)
69 sk->sk_prot->rehash(sk); 70 sk->sk_prot->rehash(sk);
70 } 71 }
71 inet->inet_daddr = rt->rt_dst; 72 inet->inet_daddr = fl4.daddr;
72 inet->inet_dport = usin->sin_port; 73 inet->inet_dport = usin->sin_port;
73 sk->sk_state = TCP_ESTABLISHED; 74 sk->sk_state = TCP_ESTABLISHED;
74 inet->inet_id = jiffies; 75 inet->inet_id = jiffies;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 03f994bcf7de..a5b413416da3 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -276,7 +276,7 @@ error:
276 276
277static int esp_input_done2(struct sk_buff *skb, int err) 277static int esp_input_done2(struct sk_buff *skb, int err)
278{ 278{
279 struct iphdr *iph; 279 const struct iphdr *iph;
280 struct xfrm_state *x = xfrm_input_state(skb); 280 struct xfrm_state *x = xfrm_input_state(skb);
281 struct esp_data *esp = x->data; 281 struct esp_data *esp = x->data;
282 struct crypto_aead *aead = esp->aead; 282 struct crypto_aead *aead = esp->aead;
@@ -484,7 +484,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
484static void esp4_err(struct sk_buff *skb, u32 info) 484static void esp4_err(struct sk_buff *skb, u32 info)
485{ 485{
486 struct net *net = dev_net(skb->dev); 486 struct net *net = dev_net(skb->dev);
487 struct iphdr *iph = (struct iphdr *)skb->data; 487 const struct iphdr *iph = (const struct iphdr *)skb->data;
488 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 488 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
489 struct xfrm_state *x; 489 struct xfrm_state *x;
490 490
@@ -492,7 +492,8 @@ static void esp4_err(struct sk_buff *skb, u32 info)
492 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 492 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
493 return; 493 return;
494 494
495 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 495 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
496 esph->spi, IPPROTO_ESP, AF_INET);
496 if (!x) 497 if (!x)
497 return; 498 return;
498 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 499 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 451088330bbb..22524716fe70 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -44,6 +44,7 @@
44#include <net/arp.h> 44#include <net/arp.h>
45#include <net/ip_fib.h> 45#include <net/ip_fib.h>
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47#include <net/xfrm.h>
47 48
48#ifndef CONFIG_IP_MULTIPLE_TABLES 49#ifndef CONFIG_IP_MULTIPLE_TABLES
49 50
@@ -188,9 +189,9 @@ EXPORT_SYMBOL(inet_dev_addr_type);
188 * - check, that packet arrived from expected physical interface. 189 * - check, that packet arrived from expected physical interface.
189 * called with rcu_read_lock() 190 * called with rcu_read_lock()
190 */ 191 */
191int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, 192int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
192 struct net_device *dev, __be32 *spec_dst, 193 int oif, struct net_device *dev, __be32 *spec_dst,
193 u32 *itag, u32 mark) 194 u32 *itag)
194{ 195{
195 struct in_device *in_dev; 196 struct in_device *in_dev;
196 struct flowi4 fl4; 197 struct flowi4 fl4;
@@ -202,7 +203,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
202 203
203 fl4.flowi4_oif = 0; 204 fl4.flowi4_oif = 0;
204 fl4.flowi4_iif = oif; 205 fl4.flowi4_iif = oif;
205 fl4.flowi4_mark = mark;
206 fl4.daddr = src; 206 fl4.daddr = src;
207 fl4.saddr = dst; 207 fl4.saddr = dst;
208 fl4.flowi4_tos = tos; 208 fl4.flowi4_tos = tos;
@@ -212,10 +212,12 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
212 in_dev = __in_dev_get_rcu(dev); 212 in_dev = __in_dev_get_rcu(dev);
213 if (in_dev) { 213 if (in_dev) {
214 no_addr = in_dev->ifa_list == NULL; 214 no_addr = in_dev->ifa_list == NULL;
215 rpf = IN_DEV_RPFILTER(in_dev); 215
216 /* Ignore rp_filter for packets protected by IPsec. */
217 rpf = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(in_dev);
218
216 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); 219 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
217 if (mark && !IN_DEV_SRC_VMARK(in_dev)) 220 fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
218 fl4.flowi4_mark = 0;
219 } 221 }
220 222
221 if (in_dev == NULL) 223 if (in_dev == NULL)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 5fe9b8b41df3..6375c1c5f642 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -126,7 +126,7 @@ struct tnode {
126 struct work_struct work; 126 struct work_struct work;
127 struct tnode *tnode_free; 127 struct tnode *tnode_free;
128 }; 128 };
129 struct rt_trie_node *child[0]; 129 struct rt_trie_node __rcu *child[0];
130}; 130};
131 131
132#ifdef CONFIG_IP_FIB_TRIE_STATS 132#ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -151,7 +151,7 @@ struct trie_stat {
151}; 151};
152 152
153struct trie { 153struct trie {
154 struct rt_trie_node *trie; 154 struct rt_trie_node __rcu *trie;
155#ifdef CONFIG_IP_FIB_TRIE_STATS 155#ifdef CONFIG_IP_FIB_TRIE_STATS
156 struct trie_use_stats stats; 156 struct trie_use_stats stats;
157#endif 157#endif
@@ -177,16 +177,29 @@ static const int sync_pages = 128;
177static struct kmem_cache *fn_alias_kmem __read_mostly; 177static struct kmem_cache *fn_alias_kmem __read_mostly;
178static struct kmem_cache *trie_leaf_kmem __read_mostly; 178static struct kmem_cache *trie_leaf_kmem __read_mostly;
179 179
180static inline struct tnode *node_parent(struct rt_trie_node *node) 180/*
181 * caller must hold RTNL
182 */
183static inline struct tnode *node_parent(const struct rt_trie_node *node)
181{ 184{
182 return (struct tnode *)(node->parent & ~NODE_TYPE_MASK); 185 unsigned long parent;
186
187 parent = rcu_dereference_index_check(node->parent, lockdep_rtnl_is_held());
188
189 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
183} 190}
184 191
185static inline struct tnode *node_parent_rcu(struct rt_trie_node *node) 192/*
193 * caller must hold RCU read lock or RTNL
194 */
195static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
186{ 196{
187 struct tnode *ret = node_parent(node); 197 unsigned long parent;
198
199 parent = rcu_dereference_index_check(node->parent, rcu_read_lock_held() ||
200 lockdep_rtnl_is_held());
188 201
189 return rcu_dereference_rtnl(ret); 202 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
190} 203}
191 204
192/* Same as rcu_assign_pointer 205/* Same as rcu_assign_pointer
@@ -198,18 +211,24 @@ static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
198 node->parent = (unsigned long)ptr | NODE_TYPE(node); 211 node->parent = (unsigned long)ptr | NODE_TYPE(node);
199} 212}
200 213
201static inline struct rt_trie_node *tnode_get_child(struct tnode *tn, unsigned int i) 214/*
215 * caller must hold RTNL
216 */
217static inline struct rt_trie_node *tnode_get_child(const struct tnode *tn, unsigned int i)
202{ 218{
203 BUG_ON(i >= 1U << tn->bits); 219 BUG_ON(i >= 1U << tn->bits);
204 220
205 return tn->child[i]; 221 return rtnl_dereference(tn->child[i]);
206} 222}
207 223
208static inline struct rt_trie_node *tnode_get_child_rcu(struct tnode *tn, unsigned int i) 224/*
225 * caller must hold RCU read lock or RTNL
226 */
227static inline struct rt_trie_node *tnode_get_child_rcu(const struct tnode *tn, unsigned int i)
209{ 228{
210 struct rt_trie_node *ret = tnode_get_child(tn, i); 229 BUG_ON(i >= 1U << tn->bits);
211 230
212 return rcu_dereference_rtnl(ret); 231 return rcu_dereference_rtnl(tn->child[i]);
213} 232}
214 233
215static inline int tnode_child_length(const struct tnode *tn) 234static inline int tnode_child_length(const struct tnode *tn)
@@ -487,7 +506,7 @@ static inline void put_child(struct trie *t, struct tnode *tn, int i,
487static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n, 506static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
488 int wasfull) 507 int wasfull)
489{ 508{
490 struct rt_trie_node *chi = tn->child[i]; 509 struct rt_trie_node *chi = rtnl_dereference(tn->child[i]);
491 int isfull; 510 int isfull;
492 511
493 BUG_ON(i >= 1<<tn->bits); 512 BUG_ON(i >= 1<<tn->bits);
@@ -665,7 +684,7 @@ one_child:
665 for (i = 0; i < tnode_child_length(tn); i++) { 684 for (i = 0; i < tnode_child_length(tn); i++) {
666 struct rt_trie_node *n; 685 struct rt_trie_node *n;
667 686
668 n = tn->child[i]; 687 n = rtnl_dereference(tn->child[i]);
669 if (!n) 688 if (!n)
670 continue; 689 continue;
671 690
@@ -679,6 +698,20 @@ one_child:
679 return (struct rt_trie_node *) tn; 698 return (struct rt_trie_node *) tn;
680} 699}
681 700
701
702static void tnode_clean_free(struct tnode *tn)
703{
704 int i;
705 struct tnode *tofree;
706
707 for (i = 0; i < tnode_child_length(tn); i++) {
708 tofree = (struct tnode *)rtnl_dereference(tn->child[i]);
709 if (tofree)
710 tnode_free(tofree);
711 }
712 tnode_free(tn);
713}
714
682static struct tnode *inflate(struct trie *t, struct tnode *tn) 715static struct tnode *inflate(struct trie *t, struct tnode *tn)
683{ 716{
684 struct tnode *oldtnode = tn; 717 struct tnode *oldtnode = tn;
@@ -755,8 +788,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
755 inode = (struct tnode *) node; 788 inode = (struct tnode *) node;
756 789
757 if (inode->bits == 1) { 790 if (inode->bits == 1) {
758 put_child(t, tn, 2*i, inode->child[0]); 791 put_child(t, tn, 2*i, rtnl_dereference(inode->child[0]));
759 put_child(t, tn, 2*i+1, inode->child[1]); 792 put_child(t, tn, 2*i+1, rtnl_dereference(inode->child[1]));
760 793
761 tnode_free_safe(inode); 794 tnode_free_safe(inode);
762 continue; 795 continue;
@@ -797,8 +830,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
797 830
798 size = tnode_child_length(left); 831 size = tnode_child_length(left);
799 for (j = 0; j < size; j++) { 832 for (j = 0; j < size; j++) {
800 put_child(t, left, j, inode->child[j]); 833 put_child(t, left, j, rtnl_dereference(inode->child[j]));
801 put_child(t, right, j, inode->child[j + size]); 834 put_child(t, right, j, rtnl_dereference(inode->child[j + size]));
802 } 835 }
803 put_child(t, tn, 2*i, resize(t, left)); 836 put_child(t, tn, 2*i, resize(t, left));
804 put_child(t, tn, 2*i+1, resize(t, right)); 837 put_child(t, tn, 2*i+1, resize(t, right));
@@ -808,18 +841,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
808 tnode_free_safe(oldtnode); 841 tnode_free_safe(oldtnode);
809 return tn; 842 return tn;
810nomem: 843nomem:
811 { 844 tnode_clean_free(tn);
812 int size = tnode_child_length(tn); 845 return ERR_PTR(-ENOMEM);
813 int j;
814
815 for (j = 0; j < size; j++)
816 if (tn->child[j])
817 tnode_free((struct tnode *)tn->child[j]);
818
819 tnode_free(tn);
820
821 return ERR_PTR(-ENOMEM);
822 }
823} 846}
824 847
825static struct tnode *halve(struct trie *t, struct tnode *tn) 848static struct tnode *halve(struct trie *t, struct tnode *tn)
@@ -890,18 +913,8 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
890 tnode_free_safe(oldtnode); 913 tnode_free_safe(oldtnode);
891 return tn; 914 return tn;
892nomem: 915nomem:
893 { 916 tnode_clean_free(tn);
894 int size = tnode_child_length(tn); 917 return ERR_PTR(-ENOMEM);
895 int j;
896
897 for (j = 0; j < size; j++)
898 if (tn->child[j])
899 tnode_free((struct tnode *)tn->child[j]);
900
901 tnode_free(tn);
902
903 return ERR_PTR(-ENOMEM);
904 }
905} 918}
906 919
907/* readside must use rcu_read_lock currently dump routines 920/* readside must use rcu_read_lock currently dump routines
@@ -1033,7 +1046,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1033 t_key cindex; 1046 t_key cindex;
1034 1047
1035 pos = 0; 1048 pos = 0;
1036 n = t->trie; 1049 n = rtnl_dereference(t->trie);
1037 1050
1038 /* If we point to NULL, stop. Either the tree is empty and we should 1051 /* If we point to NULL, stop. Either the tree is empty and we should
1039 * just put a new leaf in if, or we have reached an empty child slot, 1052 * just put a new leaf in if, or we have reached an empty child slot,
@@ -1319,6 +1332,9 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1319 } 1332 }
1320 } 1333 }
1321 1334
1335 if (!plen)
1336 tb->tb_num_default++;
1337
1322 list_add_tail_rcu(&new_fa->fa_list, 1338 list_add_tail_rcu(&new_fa->fa_list,
1323 (fa ? &fa->fa_list : fa_head)); 1339 (fa ? &fa->fa_list : fa_head));
1324 1340
@@ -1684,6 +1700,9 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
1684 1700
1685 list_del_rcu(&fa->fa_list); 1701 list_del_rcu(&fa->fa_list);
1686 1702
1703 if (!plen)
1704 tb->tb_num_default--;
1705
1687 if (list_empty(fa_head)) { 1706 if (list_empty(fa_head)) {
1688 hlist_del_rcu(&li->hlist); 1707 hlist_del_rcu(&li->hlist);
1689 free_leaf_info(li); 1708 free_leaf_info(li);
@@ -1756,7 +1775,7 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
1756 continue; 1775 continue;
1757 1776
1758 if (IS_LEAF(c)) { 1777 if (IS_LEAF(c)) {
1759 prefetch(p->child[idx]); 1778 prefetch(rcu_dereference_rtnl(p->child[idx]));
1760 return (struct leaf *) c; 1779 return (struct leaf *) c;
1761 } 1780 }
1762 1781
@@ -1974,6 +1993,7 @@ struct fib_table *fib_trie_table(u32 id)
1974 1993
1975 tb->tb_id = id; 1994 tb->tb_id = id;
1976 tb->tb_default = -1; 1995 tb->tb_default = -1;
1996 tb->tb_num_default = 0;
1977 1997
1978 t = (struct trie *) tb->tb_data; 1998 t = (struct trie *) tb->tb_data;
1979 memset(t, 0, sizeof(*t)); 1999 memset(t, 0, sizeof(*t));
@@ -2269,7 +2289,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2269 2289
2270 /* walk rest of this hash chain */ 2290 /* walk rest of this hash chain */
2271 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1); 2291 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
2272 while ( (tb_node = rcu_dereference(tb->tb_hlist.next)) ) { 2292 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
2273 tb = hlist_entry(tb_node, struct fib_table, tb_hlist); 2293 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2274 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); 2294 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2275 if (n) 2295 if (n)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index e5f8a71d3a2a..cfeca3c2152d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -108,8 +108,7 @@ struct icmp_bxm {
108 __be32 times[3]; 108 __be32 times[3];
109 } data; 109 } data;
110 int head_len; 110 int head_len;
111 struct ip_options replyopts; 111 struct ip_options_data replyopts;
112 unsigned char optbuf[40];
113}; 112};
114 113
115/* An array of errno for error messages from dest unreach. */ 114/* An array of errno for error messages from dest unreach. */
@@ -333,7 +332,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
333 struct inet_sock *inet; 332 struct inet_sock *inet;
334 __be32 daddr; 333 __be32 daddr;
335 334
336 if (ip_options_echo(&icmp_param->replyopts, skb)) 335 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
337 return; 336 return;
338 337
339 sk = icmp_xmit_lock(net); 338 sk = icmp_xmit_lock(net);
@@ -347,10 +346,10 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
347 daddr = ipc.addr = rt->rt_src; 346 daddr = ipc.addr = rt->rt_src;
348 ipc.opt = NULL; 347 ipc.opt = NULL;
349 ipc.tx_flags = 0; 348 ipc.tx_flags = 0;
350 if (icmp_param->replyopts.optlen) { 349 if (icmp_param->replyopts.opt.opt.optlen) {
351 ipc.opt = &icmp_param->replyopts; 350 ipc.opt = &icmp_param->replyopts.opt;
352 if (ipc.opt->srr) 351 if (ipc.opt->opt.srr)
353 daddr = icmp_param->replyopts.faddr; 352 daddr = icmp_param->replyopts.opt.opt.faddr;
354 } 353 }
355 { 354 {
356 struct flowi4 fl4 = { 355 struct flowi4 fl4 = {
@@ -373,14 +372,14 @@ out_unlock:
373} 372}
374 373
375static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in, 374static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
376 struct iphdr *iph, 375 const struct iphdr *iph,
377 __be32 saddr, u8 tos, 376 __be32 saddr, u8 tos,
378 int type, int code, 377 int type, int code,
379 struct icmp_bxm *param) 378 struct icmp_bxm *param)
380{ 379{
381 struct flowi4 fl4 = { 380 struct flowi4 fl4 = {
382 .daddr = (param->replyopts.srr ? 381 .daddr = (param->replyopts.opt.opt.srr ?
383 param->replyopts.faddr : iph->saddr), 382 param->replyopts.opt.opt.faddr : iph->saddr),
384 .saddr = saddr, 383 .saddr = saddr,
385 .flowi4_tos = RT_TOS(tos), 384 .flowi4_tos = RT_TOS(tos),
386 .flowi4_proto = IPPROTO_ICMP, 385 .flowi4_proto = IPPROTO_ICMP,
@@ -581,7 +580,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
581 IPTOS_PREC_INTERNETCONTROL) : 580 IPTOS_PREC_INTERNETCONTROL) :
582 iph->tos; 581 iph->tos;
583 582
584 if (ip_options_echo(&icmp_param.replyopts, skb_in)) 583 if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in))
585 goto out_unlock; 584 goto out_unlock;
586 585
587 586
@@ -597,7 +596,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
597 icmp_param.offset = skb_network_offset(skb_in); 596 icmp_param.offset = skb_network_offset(skb_in);
598 inet_sk(sk)->tos = tos; 597 inet_sk(sk)->tos = tos;
599 ipc.addr = iph->saddr; 598 ipc.addr = iph->saddr;
600 ipc.opt = &icmp_param.replyopts; 599 ipc.opt = &icmp_param.replyopts.opt;
601 ipc.tx_flags = 0; 600 ipc.tx_flags = 0;
602 601
603 rt = icmp_route_lookup(net, skb_in, iph, saddr, tos, 602 rt = icmp_route_lookup(net, skb_in, iph, saddr, tos,
@@ -613,7 +612,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
613 room = dst_mtu(&rt->dst); 612 room = dst_mtu(&rt->dst);
614 if (room > 576) 613 if (room > 576)
615 room = 576; 614 room = 576;
616 room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; 615 room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
617 room -= sizeof(struct icmphdr); 616 room -= sizeof(struct icmphdr);
618 617
619 icmp_param.data_len = skb_in->len - icmp_param.offset; 618 icmp_param.data_len = skb_in->len - icmp_param.offset;
@@ -637,7 +636,7 @@ EXPORT_SYMBOL(icmp_send);
637 636
638static void icmp_unreach(struct sk_buff *skb) 637static void icmp_unreach(struct sk_buff *skb)
639{ 638{
640 struct iphdr *iph; 639 const struct iphdr *iph;
641 struct icmphdr *icmph; 640 struct icmphdr *icmph;
642 int hash, protocol; 641 int hash, protocol;
643 const struct net_protocol *ipprot; 642 const struct net_protocol *ipprot;
@@ -656,7 +655,7 @@ static void icmp_unreach(struct sk_buff *skb)
656 goto out_err; 655 goto out_err;
657 656
658 icmph = icmp_hdr(skb); 657 icmph = icmp_hdr(skb);
659 iph = (struct iphdr *)skb->data; 658 iph = (const struct iphdr *)skb->data;
660 659
661 if (iph->ihl < 5) /* Mangled header, drop. */ 660 if (iph->ihl < 5) /* Mangled header, drop. */
662 goto out_err; 661 goto out_err;
@@ -729,7 +728,7 @@ static void icmp_unreach(struct sk_buff *skb)
729 if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) 728 if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
730 goto out; 729 goto out;
731 730
732 iph = (struct iphdr *)skb->data; 731 iph = (const struct iphdr *)skb->data;
733 protocol = iph->protocol; 732 protocol = iph->protocol;
734 733
735 /* 734 /*
@@ -758,7 +757,7 @@ out_err:
758 757
759static void icmp_redirect(struct sk_buff *skb) 758static void icmp_redirect(struct sk_buff *skb)
760{ 759{
761 struct iphdr *iph; 760 const struct iphdr *iph;
762 761
763 if (skb->len < sizeof(struct iphdr)) 762 if (skb->len < sizeof(struct iphdr))
764 goto out_err; 763 goto out_err;
@@ -769,7 +768,7 @@ static void icmp_redirect(struct sk_buff *skb)
769 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 768 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
770 goto out; 769 goto out;
771 770
772 iph = (struct iphdr *)skb->data; 771 iph = (const struct iphdr *)skb->data;
773 772
774 switch (icmp_hdr(skb)->code & 7) { 773 switch (icmp_hdr(skb)->code & 7) {
775 case ICMP_REDIR_NET: 774 case ICMP_REDIR_NET:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 1fd3d9ce8398..ec03c2fda6ce 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -309,6 +309,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
309 struct iphdr *pip; 309 struct iphdr *pip;
310 struct igmpv3_report *pig; 310 struct igmpv3_report *pig;
311 struct net *net = dev_net(dev); 311 struct net *net = dev_net(dev);
312 struct flowi4 fl4;
312 313
313 while (1) { 314 while (1) {
314 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), 315 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
@@ -321,18 +322,13 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
321 } 322 }
322 igmp_skb_size(skb) = size; 323 igmp_skb_size(skb) = size;
323 324
324 rt = ip_route_output_ports(net, NULL, IGMPV3_ALL_MCR, 0, 325 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
325 0, 0, 326 0, 0,
326 IPPROTO_IGMP, 0, dev->ifindex); 327 IPPROTO_IGMP, 0, dev->ifindex);
327 if (IS_ERR(rt)) { 328 if (IS_ERR(rt)) {
328 kfree_skb(skb); 329 kfree_skb(skb);
329 return NULL; 330 return NULL;
330 } 331 }
331 if (rt->rt_src == 0) {
332 kfree_skb(skb);
333 ip_rt_put(rt);
334 return NULL;
335 }
336 332
337 skb_dst_set(skb, &rt->dst); 333 skb_dst_set(skb, &rt->dst);
338 skb->dev = dev; 334 skb->dev = dev;
@@ -348,8 +344,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
348 pip->tos = 0xc0; 344 pip->tos = 0xc0;
349 pip->frag_off = htons(IP_DF); 345 pip->frag_off = htons(IP_DF);
350 pip->ttl = 1; 346 pip->ttl = 1;
351 pip->daddr = rt->rt_dst; 347 pip->daddr = fl4.daddr;
352 pip->saddr = rt->rt_src; 348 pip->saddr = fl4.saddr;
353 pip->protocol = IPPROTO_IGMP; 349 pip->protocol = IPPROTO_IGMP;
354 pip->tot_len = 0; /* filled in later */ 350 pip->tot_len = 0; /* filled in later */
355 ip_select_ident(pip, &rt->dst, NULL); 351 ip_select_ident(pip, &rt->dst, NULL);
@@ -655,6 +651,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
655 struct net_device *dev = in_dev->dev; 651 struct net_device *dev = in_dev->dev;
656 struct net *net = dev_net(dev); 652 struct net *net = dev_net(dev);
657 __be32 group = pmc ? pmc->multiaddr : 0; 653 __be32 group = pmc ? pmc->multiaddr : 0;
654 struct flowi4 fl4;
658 __be32 dst; 655 __be32 dst;
659 656
660 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) 657 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
@@ -664,17 +661,12 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
664 else 661 else
665 dst = group; 662 dst = group;
666 663
667 rt = ip_route_output_ports(net, NULL, dst, 0, 664 rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
668 0, 0, 665 0, 0,
669 IPPROTO_IGMP, 0, dev->ifindex); 666 IPPROTO_IGMP, 0, dev->ifindex);
670 if (IS_ERR(rt)) 667 if (IS_ERR(rt))
671 return -1; 668 return -1;
672 669
673 if (rt->rt_src == 0) {
674 ip_rt_put(rt);
675 return -1;
676 }
677
678 skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 670 skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
679 if (skb == NULL) { 671 if (skb == NULL) {
680 ip_rt_put(rt); 672 ip_rt_put(rt);
@@ -695,7 +687,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
695 iph->frag_off = htons(IP_DF); 687 iph->frag_off = htons(IP_DF);
696 iph->ttl = 1; 688 iph->ttl = 1;
697 iph->daddr = dst; 689 iph->daddr = dst;
698 iph->saddr = rt->rt_src; 690 iph->saddr = fl4.saddr;
699 iph->protocol = IPPROTO_IGMP; 691 iph->protocol = IPPROTO_IGMP;
700 ip_select_ident(iph, &rt->dst, NULL); 692 ip_select_ident(iph, &rt->dst, NULL);
701 ((u8*)&iph[1])[0] = IPOPT_RA; 693 ((u8*)&iph[1])[0] = IPOPT_RA;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 38f23e721b80..54944da2f794 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -354,26 +354,20 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
354{ 354{
355 struct rtable *rt; 355 struct rtable *rt;
356 const struct inet_request_sock *ireq = inet_rsk(req); 356 const struct inet_request_sock *ireq = inet_rsk(req);
357 struct ip_options *opt = inet_rsk(req)->opt; 357 struct ip_options_rcu *opt = inet_rsk(req)->opt;
358 struct flowi4 fl4 = {
359 .flowi4_oif = sk->sk_bound_dev_if,
360 .flowi4_mark = sk->sk_mark,
361 .daddr = ((opt && opt->srr) ?
362 opt->faddr : ireq->rmt_addr),
363 .saddr = ireq->loc_addr,
364 .flowi4_tos = RT_CONN_FLAGS(sk),
365 .flowi4_proto = sk->sk_protocol,
366 .flowi4_flags = inet_sk_flowi_flags(sk),
367 .fl4_sport = inet_sk(sk)->inet_sport,
368 .fl4_dport = ireq->rmt_port,
369 };
370 struct net *net = sock_net(sk); 358 struct net *net = sock_net(sk);
359 struct flowi4 fl4;
371 360
361 flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
362 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
363 sk->sk_protocol, inet_sk_flowi_flags(sk),
364 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
365 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
372 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 366 security_req_classify_flow(req, flowi4_to_flowi(&fl4));
373 rt = ip_route_output_flow(net, &fl4, sk); 367 rt = ip_route_output_flow(net, &fl4, sk);
374 if (IS_ERR(rt)) 368 if (IS_ERR(rt))
375 goto no_route; 369 goto no_route;
376 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 370 if (opt && opt->opt.is_strictroute && fl4.daddr != rt->rt_gateway)
377 goto route_err; 371 goto route_err;
378 return &rt->dst; 372 return &rt->dst;
379 373
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2ada17129fce..6ffe94ca5bc9 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -124,7 +124,7 @@ static int inet_csk_diag_fill(struct sock *sk,
124 124
125#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 125#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
126 if (r->idiag_family == AF_INET6) { 126 if (r->idiag_family == AF_INET6) {
127 struct ipv6_pinfo *np = inet6_sk(sk); 127 const struct ipv6_pinfo *np = inet6_sk(sk);
128 128
129 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 129 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
130 &np->rcv_saddr); 130 &np->rcv_saddr);
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index 47038cb6c138..85a0f75dae64 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -51,8 +51,8 @@ MODULE_DESCRIPTION("Large Receive Offload (ipv4 / tcp)");
51 * Basic tcp checks whether packet is suitable for LRO 51 * Basic tcp checks whether packet is suitable for LRO
52 */ 52 */
53 53
54static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph, 54static int lro_tcp_ip_check(const struct iphdr *iph, const struct tcphdr *tcph,
55 int len, struct net_lro_desc *lro_desc) 55 int len, const struct net_lro_desc *lro_desc)
56{ 56{
57 /* check ip header: don't aggregate padded frames */ 57 /* check ip header: don't aggregate padded frames */
58 if (ntohs(iph->tot_len) != len) 58 if (ntohs(iph->tot_len) != len)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index da5941f18c3c..8871067560db 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -413,11 +413,6 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
413 413
414 dev_net_set(dev, net); 414 dev_net_set(dev, net);
415 415
416 if (strchr(name, '%')) {
417 if (dev_alloc_name(dev, name) < 0)
418 goto failed_free;
419 }
420
421 nt = netdev_priv(dev); 416 nt = netdev_priv(dev);
422 nt->parms = *parms; 417 nt->parms = *parms;
423 dev->rtnl_link_ops = &ipgre_link_ops; 418 dev->rtnl_link_ops = &ipgre_link_ops;
@@ -462,7 +457,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
462 by themself??? 457 by themself???
463 */ 458 */
464 459
465 struct iphdr *iph = (struct iphdr *)skb->data; 460 const struct iphdr *iph = (const struct iphdr *)skb->data;
466 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); 461 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
467 int grehlen = (iph->ihl<<2) + 4; 462 int grehlen = (iph->ihl<<2) + 4;
468 const int type = icmp_hdr(skb)->type; 463 const int type = icmp_hdr(skb)->type;
@@ -534,7 +529,7 @@ out:
534 rcu_read_unlock(); 529 rcu_read_unlock();
535} 530}
536 531
537static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 532static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
538{ 533{
539 if (INET_ECN_is_ce(iph->tos)) { 534 if (INET_ECN_is_ce(iph->tos)) {
540 if (skb->protocol == htons(ETH_P_IP)) { 535 if (skb->protocol == htons(ETH_P_IP)) {
@@ -546,19 +541,19 @@ static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
546} 541}
547 542
548static inline u8 543static inline u8
549ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb) 544ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
550{ 545{
551 u8 inner = 0; 546 u8 inner = 0;
552 if (skb->protocol == htons(ETH_P_IP)) 547 if (skb->protocol == htons(ETH_P_IP))
553 inner = old_iph->tos; 548 inner = old_iph->tos;
554 else if (skb->protocol == htons(ETH_P_IPV6)) 549 else if (skb->protocol == htons(ETH_P_IPV6))
555 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph); 550 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
556 return INET_ECN_encapsulate(tos, inner); 551 return INET_ECN_encapsulate(tos, inner);
557} 552}
558 553
559static int ipgre_rcv(struct sk_buff *skb) 554static int ipgre_rcv(struct sk_buff *skb)
560{ 555{
561 struct iphdr *iph; 556 const struct iphdr *iph;
562 u8 *h; 557 u8 *h;
563 __be16 flags; 558 __be16 flags;
564 __sum16 csum = 0; 559 __sum16 csum = 0;
@@ -697,8 +692,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
697{ 692{
698 struct ip_tunnel *tunnel = netdev_priv(dev); 693 struct ip_tunnel *tunnel = netdev_priv(dev);
699 struct pcpu_tstats *tstats; 694 struct pcpu_tstats *tstats;
700 struct iphdr *old_iph = ip_hdr(skb); 695 const struct iphdr *old_iph = ip_hdr(skb);
701 struct iphdr *tiph; 696 const struct iphdr *tiph;
697 struct flowi4 fl4;
702 u8 tos; 698 u8 tos;
703 __be16 df; 699 __be16 df;
704 struct rtable *rt; /* Route to the other host */ 700 struct rtable *rt; /* Route to the other host */
@@ -714,7 +710,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
714 710
715 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 711 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
716 gre_hlen = 0; 712 gre_hlen = 0;
717 tiph = (struct iphdr *)skb->data; 713 tiph = (const struct iphdr *)skb->data;
718 } else { 714 } else {
719 gre_hlen = tunnel->hlen; 715 gre_hlen = tunnel->hlen;
720 tiph = &tunnel->parms.iph; 716 tiph = &tunnel->parms.iph;
@@ -735,14 +731,14 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
735 } 731 }
736#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 732#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
737 else if (skb->protocol == htons(ETH_P_IPV6)) { 733 else if (skb->protocol == htons(ETH_P_IPV6)) {
738 struct in6_addr *addr6; 734 const struct in6_addr *addr6;
739 int addr_type; 735 int addr_type;
740 struct neighbour *neigh = skb_dst(skb)->neighbour; 736 struct neighbour *neigh = skb_dst(skb)->neighbour;
741 737
742 if (neigh == NULL) 738 if (neigh == NULL)
743 goto tx_error; 739 goto tx_error;
744 740
745 addr6 = (struct in6_addr *)&neigh->primary_key; 741 addr6 = (const struct in6_addr *)&neigh->primary_key;
746 addr_type = ipv6_addr_type(addr6); 742 addr_type = ipv6_addr_type(addr6);
747 743
748 if (addr_type == IPV6_ADDR_ANY) { 744 if (addr_type == IPV6_ADDR_ANY) {
@@ -766,10 +762,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
766 if (skb->protocol == htons(ETH_P_IP)) 762 if (skb->protocol == htons(ETH_P_IP))
767 tos = old_iph->tos; 763 tos = old_iph->tos;
768 else if (skb->protocol == htons(ETH_P_IPV6)) 764 else if (skb->protocol == htons(ETH_P_IPV6))
769 tos = ipv6_get_dsfield((struct ipv6hdr *)old_iph); 765 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
770 } 766 }
771 767
772 rt = ip_route_output_gre(dev_net(dev), dst, tiph->saddr, 768 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
773 tunnel->parms.o_key, RT_TOS(tos), 769 tunnel->parms.o_key, RT_TOS(tos),
774 tunnel->parms.link); 770 tunnel->parms.link);
775 if (IS_ERR(rt)) { 771 if (IS_ERR(rt)) {
@@ -873,15 +869,15 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
873 iph->frag_off = df; 869 iph->frag_off = df;
874 iph->protocol = IPPROTO_GRE; 870 iph->protocol = IPPROTO_GRE;
875 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb); 871 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
876 iph->daddr = rt->rt_dst; 872 iph->daddr = fl4.daddr;
877 iph->saddr = rt->rt_src; 873 iph->saddr = fl4.saddr;
878 874
879 if ((iph->ttl = tiph->ttl) == 0) { 875 if ((iph->ttl = tiph->ttl) == 0) {
880 if (skb->protocol == htons(ETH_P_IP)) 876 if (skb->protocol == htons(ETH_P_IP))
881 iph->ttl = old_iph->ttl; 877 iph->ttl = old_iph->ttl;
882#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 878#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
883 else if (skb->protocol == htons(ETH_P_IPV6)) 879 else if (skb->protocol == htons(ETH_P_IPV6))
884 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 880 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
885#endif 881#endif
886 else 882 else
887 iph->ttl = ip4_dst_hoplimit(&rt->dst); 883 iph->ttl = ip4_dst_hoplimit(&rt->dst);
@@ -927,7 +923,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
927{ 923{
928 struct net_device *tdev = NULL; 924 struct net_device *tdev = NULL;
929 struct ip_tunnel *tunnel; 925 struct ip_tunnel *tunnel;
930 struct iphdr *iph; 926 const struct iphdr *iph;
931 int hlen = LL_MAX_HEADER; 927 int hlen = LL_MAX_HEADER;
932 int mtu = ETH_DATA_LEN; 928 int mtu = ETH_DATA_LEN;
933 int addend = sizeof(struct iphdr) + 4; 929 int addend = sizeof(struct iphdr) + 4;
@@ -938,12 +934,14 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
938 /* Guess output device to choose reasonable mtu and needed_headroom */ 934 /* Guess output device to choose reasonable mtu and needed_headroom */
939 935
940 if (iph->daddr) { 936 if (iph->daddr) {
941 struct rtable *rt = ip_route_output_gre(dev_net(dev), 937 struct flowi4 fl4;
942 iph->daddr, iph->saddr, 938 struct rtable *rt;
943 tunnel->parms.o_key, 939
944 RT_TOS(iph->tos), 940 rt = ip_route_output_gre(dev_net(dev), &fl4,
945 tunnel->parms.link); 941 iph->daddr, iph->saddr,
946 942 tunnel->parms.o_key,
943 RT_TOS(iph->tos),
944 tunnel->parms.link);
947 if (!IS_ERR(rt)) { 945 if (!IS_ERR(rt)) {
948 tdev = rt->dst.dev; 946 tdev = rt->dst.dev;
949 ip_rt_put(rt); 947 ip_rt_put(rt);
@@ -1180,7 +1178,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1180 1178
1181static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 1179static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1182{ 1180{
1183 struct iphdr *iph = (struct iphdr *) skb_mac_header(skb); 1181 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1184 memcpy(haddr, &iph->saddr, 4); 1182 memcpy(haddr, &iph->saddr, 4);
1185 return 4; 1183 return 4;
1186} 1184}
@@ -1196,13 +1194,15 @@ static int ipgre_open(struct net_device *dev)
1196 struct ip_tunnel *t = netdev_priv(dev); 1194 struct ip_tunnel *t = netdev_priv(dev);
1197 1195
1198 if (ipv4_is_multicast(t->parms.iph.daddr)) { 1196 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1199 struct rtable *rt = ip_route_output_gre(dev_net(dev), 1197 struct flowi4 fl4;
1200 t->parms.iph.daddr, 1198 struct rtable *rt;
1201 t->parms.iph.saddr, 1199
1202 t->parms.o_key, 1200 rt = ip_route_output_gre(dev_net(dev), &fl4,
1203 RT_TOS(t->parms.iph.tos), 1201 t->parms.iph.daddr,
1204 t->parms.link); 1202 t->parms.iph.saddr,
1205 1203 t->parms.o_key,
1204 RT_TOS(t->parms.iph.tos),
1205 t->parms.link);
1206 if (IS_ERR(rt)) 1206 if (IS_ERR(rt))
1207 return -EADDRNOTAVAIL; 1207 return -EADDRNOTAVAIL;
1208 dev = rt->dst.dev; 1208 dev = rt->dst.dev;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d7b2b0987a3b..c8f48efc5fd3 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -268,7 +268,7 @@ int ip_local_deliver(struct sk_buff *skb)
268static inline int ip_rcv_options(struct sk_buff *skb) 268static inline int ip_rcv_options(struct sk_buff *skb)
269{ 269{
270 struct ip_options *opt; 270 struct ip_options *opt;
271 struct iphdr *iph; 271 const struct iphdr *iph;
272 struct net_device *dev = skb->dev; 272 struct net_device *dev = skb->dev;
273 273
274 /* It looks as overkill, because not all 274 /* It looks as overkill, because not all
@@ -374,7 +374,7 @@ drop:
374 */ 374 */
375int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 375int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
376{ 376{
377 struct iphdr *iph; 377 const struct iphdr *iph;
378 u32 len; 378 u32 len;
379 379
380 /* When the interface is in promisc. mode, drop all the crap 380 /* When the interface is in promisc. mode, drop all the crap
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 2391b24e8251..01fc40965848 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -36,7 +36,7 @@
36 * saddr is address of outgoing interface. 36 * saddr is address of outgoing interface.
37 */ 37 */
38 38
39void ip_options_build(struct sk_buff * skb, struct ip_options * opt, 39void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
40 __be32 daddr, struct rtable *rt, int is_frag) 40 __be32 daddr, struct rtable *rt, int is_frag)
41{ 41{
42 unsigned char *iph = skb_network_header(skb); 42 unsigned char *iph = skb_network_header(skb);
@@ -83,9 +83,9 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
83 * NOTE: dopt cannot point to skb. 83 * NOTE: dopt cannot point to skb.
84 */ 84 */
85 85
86int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) 86int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
87{ 87{
88 struct ip_options *sopt; 88 const struct ip_options *sopt;
89 unsigned char *sptr, *dptr; 89 unsigned char *sptr, *dptr;
90 int soffset, doffset; 90 int soffset, doffset;
91 int optlen; 91 int optlen;
@@ -95,10 +95,8 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
95 95
96 sopt = &(IPCB(skb)->opt); 96 sopt = &(IPCB(skb)->opt);
97 97
98 if (sopt->optlen == 0) { 98 if (sopt->optlen == 0)
99 dopt->optlen = 0;
100 return 0; 99 return 0;
101 }
102 100
103 sptr = skb_network_header(skb); 101 sptr = skb_network_header(skb);
104 dptr = dopt->__data; 102 dptr = dopt->__data;
@@ -157,7 +155,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
157 dopt->optlen += optlen; 155 dopt->optlen += optlen;
158 } 156 }
159 if (sopt->srr) { 157 if (sopt->srr) {
160 unsigned char * start = sptr+sopt->srr; 158 unsigned char *start = sptr+sopt->srr;
161 __be32 faddr; 159 __be32 faddr;
162 160
163 optlen = start[1]; 161 optlen = start[1];
@@ -499,19 +497,19 @@ void ip_options_undo(struct ip_options * opt)
499 } 497 }
500} 498}
501 499
502static struct ip_options *ip_options_get_alloc(const int optlen) 500static struct ip_options_rcu *ip_options_get_alloc(const int optlen)
503{ 501{
504 return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3), 502 return kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3),
505 GFP_KERNEL); 503 GFP_KERNEL);
506} 504}
507 505
508static int ip_options_get_finish(struct net *net, struct ip_options **optp, 506static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp,
509 struct ip_options *opt, int optlen) 507 struct ip_options_rcu *opt, int optlen)
510{ 508{
511 while (optlen & 3) 509 while (optlen & 3)
512 opt->__data[optlen++] = IPOPT_END; 510 opt->opt.__data[optlen++] = IPOPT_END;
513 opt->optlen = optlen; 511 opt->opt.optlen = optlen;
514 if (optlen && ip_options_compile(net, opt, NULL)) { 512 if (optlen && ip_options_compile(net, &opt->opt, NULL)) {
515 kfree(opt); 513 kfree(opt);
516 return -EINVAL; 514 return -EINVAL;
517 } 515 }
@@ -520,29 +518,29 @@ static int ip_options_get_finish(struct net *net, struct ip_options **optp,
520 return 0; 518 return 0;
521} 519}
522 520
523int ip_options_get_from_user(struct net *net, struct ip_options **optp, 521int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
524 unsigned char __user *data, int optlen) 522 unsigned char __user *data, int optlen)
525{ 523{
526 struct ip_options *opt = ip_options_get_alloc(optlen); 524 struct ip_options_rcu *opt = ip_options_get_alloc(optlen);
527 525
528 if (!opt) 526 if (!opt)
529 return -ENOMEM; 527 return -ENOMEM;
530 if (optlen && copy_from_user(opt->__data, data, optlen)) { 528 if (optlen && copy_from_user(opt->opt.__data, data, optlen)) {
531 kfree(opt); 529 kfree(opt);
532 return -EFAULT; 530 return -EFAULT;
533 } 531 }
534 return ip_options_get_finish(net, optp, opt, optlen); 532 return ip_options_get_finish(net, optp, opt, optlen);
535} 533}
536 534
537int ip_options_get(struct net *net, struct ip_options **optp, 535int ip_options_get(struct net *net, struct ip_options_rcu **optp,
538 unsigned char *data, int optlen) 536 unsigned char *data, int optlen)
539{ 537{
540 struct ip_options *opt = ip_options_get_alloc(optlen); 538 struct ip_options_rcu *opt = ip_options_get_alloc(optlen);
541 539
542 if (!opt) 540 if (!opt)
543 return -ENOMEM; 541 return -ENOMEM;
544 if (optlen) 542 if (optlen)
545 memcpy(opt->__data, data, optlen); 543 memcpy(opt->opt.__data, data, optlen);
546 return ip_options_get_finish(net, optp, opt, optlen); 544 return ip_options_get_finish(net, optp, opt, optlen);
547} 545}
548 546
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 459c011b1d4a..db38c1822de8 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -140,14 +140,14 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
140 * 140 *
141 */ 141 */
142int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, 142int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
143 __be32 saddr, __be32 daddr, struct ip_options *opt) 143 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
144{ 144{
145 struct inet_sock *inet = inet_sk(sk); 145 struct inet_sock *inet = inet_sk(sk);
146 struct rtable *rt = skb_rtable(skb); 146 struct rtable *rt = skb_rtable(skb);
147 struct iphdr *iph; 147 struct iphdr *iph;
148 148
149 /* Build the IP header. */ 149 /* Build the IP header. */
150 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); 150 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
151 skb_reset_network_header(skb); 151 skb_reset_network_header(skb);
152 iph = ip_hdr(skb); 152 iph = ip_hdr(skb);
153 iph->version = 4; 153 iph->version = 4;
@@ -158,14 +158,14 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
158 else 158 else
159 iph->frag_off = 0; 159 iph->frag_off = 0;
160 iph->ttl = ip_select_ttl(inet, &rt->dst); 160 iph->ttl = ip_select_ttl(inet, &rt->dst);
161 iph->daddr = rt->rt_dst; 161 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
162 iph->saddr = rt->rt_src; 162 iph->saddr = saddr;
163 iph->protocol = sk->sk_protocol; 163 iph->protocol = sk->sk_protocol;
164 ip_select_ident(iph, &rt->dst, sk); 164 ip_select_ident(iph, &rt->dst, sk);
165 165
166 if (opt && opt->optlen) { 166 if (opt && opt->opt.optlen) {
167 iph->ihl += opt->optlen>>2; 167 iph->ihl += opt->opt.optlen>>2;
168 ip_options_build(skb, opt, daddr, rt, 0); 168 ip_options_build(skb, &opt->opt, daddr, rt, 0);
169 } 169 }
170 170
171 skb->priority = sk->sk_priority; 171 skb->priority = sk->sk_priority;
@@ -316,7 +316,7 @@ int ip_queue_xmit(struct sk_buff *skb)
316{ 316{
317 struct sock *sk = skb->sk; 317 struct sock *sk = skb->sk;
318 struct inet_sock *inet = inet_sk(sk); 318 struct inet_sock *inet = inet_sk(sk);
319 struct ip_options *opt = inet->opt; 319 struct ip_options_rcu *inet_opt;
320 struct rtable *rt; 320 struct rtable *rt;
321 struct iphdr *iph; 321 struct iphdr *iph;
322 int res; 322 int res;
@@ -325,6 +325,7 @@ int ip_queue_xmit(struct sk_buff *skb)
325 * f.e. by something like SCTP. 325 * f.e. by something like SCTP.
326 */ 326 */
327 rcu_read_lock(); 327 rcu_read_lock();
328 inet_opt = rcu_dereference(inet->inet_opt);
328 rt = skb_rtable(skb); 329 rt = skb_rtable(skb);
329 if (rt != NULL) 330 if (rt != NULL)
330 goto packet_routed; 331 goto packet_routed;
@@ -332,18 +333,19 @@ int ip_queue_xmit(struct sk_buff *skb)
332 /* Make sure we can route this packet. */ 333 /* Make sure we can route this packet. */
333 rt = (struct rtable *)__sk_dst_check(sk, 0); 334 rt = (struct rtable *)__sk_dst_check(sk, 0);
334 if (rt == NULL) { 335 if (rt == NULL) {
336 struct flowi4 fl4;
335 __be32 daddr; 337 __be32 daddr;
336 338
337 /* Use correct destination address if we have options. */ 339 /* Use correct destination address if we have options. */
338 daddr = inet->inet_daddr; 340 daddr = inet->inet_daddr;
339 if(opt && opt->srr) 341 if (inet_opt && inet_opt->opt.srr)
340 daddr = opt->faddr; 342 daddr = inet_opt->opt.faddr;
341 343
342 /* If this fails, retransmit mechanism of transport layer will 344 /* If this fails, retransmit mechanism of transport layer will
343 * keep trying until route appears or the connection times 345 * keep trying until route appears or the connection times
344 * itself out. 346 * itself out.
345 */ 347 */
346 rt = ip_route_output_ports(sock_net(sk), sk, 348 rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
347 daddr, inet->inet_saddr, 349 daddr, inet->inet_saddr,
348 inet->inet_dport, 350 inet->inet_dport,
349 inet->inet_sport, 351 inet->inet_sport,
@@ -357,11 +359,11 @@ int ip_queue_xmit(struct sk_buff *skb)
357 skb_dst_set_noref(skb, &rt->dst); 359 skb_dst_set_noref(skb, &rt->dst);
358 360
359packet_routed: 361packet_routed:
360 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 362 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_dst != rt->rt_gateway)
361 goto no_route; 363 goto no_route;
362 364
363 /* OK, we know where to send it, allocate and build IP header. */ 365 /* OK, we know where to send it, allocate and build IP header. */
364 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); 366 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
365 skb_reset_network_header(skb); 367 skb_reset_network_header(skb);
366 iph = ip_hdr(skb); 368 iph = ip_hdr(skb);
367 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); 369 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
@@ -375,9 +377,9 @@ packet_routed:
375 iph->daddr = rt->rt_dst; 377 iph->daddr = rt->rt_dst;
376 /* Transport layer set skb->h.foo itself. */ 378 /* Transport layer set skb->h.foo itself. */
377 379
378 if (opt && opt->optlen) { 380 if (inet_opt && inet_opt->opt.optlen) {
379 iph->ihl += opt->optlen >> 2; 381 iph->ihl += inet_opt->opt.optlen >> 2;
380 ip_options_build(skb, opt, inet->inet_daddr, rt, 0); 382 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
381 } 383 }
382 384
383 ip_select_ident_more(iph, &rt->dst, sk, 385 ip_select_ident_more(iph, &rt->dst, sk,
@@ -1033,7 +1035,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1033 struct ipcm_cookie *ipc, struct rtable **rtp) 1035 struct ipcm_cookie *ipc, struct rtable **rtp)
1034{ 1036{
1035 struct inet_sock *inet = inet_sk(sk); 1037 struct inet_sock *inet = inet_sk(sk);
1036 struct ip_options *opt; 1038 struct ip_options_rcu *opt;
1037 struct rtable *rt; 1039 struct rtable *rt;
1038 1040
1039 /* 1041 /*
@@ -1047,7 +1049,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1047 if (unlikely(cork->opt == NULL)) 1049 if (unlikely(cork->opt == NULL))
1048 return -ENOBUFS; 1050 return -ENOBUFS;
1049 } 1051 }
1050 memcpy(cork->opt, opt, sizeof(struct ip_options) + opt->optlen); 1052 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1051 cork->flags |= IPCORK_OPT; 1053 cork->flags |= IPCORK_OPT;
1052 cork->addr = ipc->addr; 1054 cork->addr = ipc->addr;
1053 } 1055 }
@@ -1451,39 +1453,34 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1451 unsigned int len) 1453 unsigned int len)
1452{ 1454{
1453 struct inet_sock *inet = inet_sk(sk); 1455 struct inet_sock *inet = inet_sk(sk);
1454 struct { 1456 struct ip_options_data replyopts;
1455 struct ip_options opt;
1456 char data[40];
1457 } replyopts;
1458 struct ipcm_cookie ipc; 1457 struct ipcm_cookie ipc;
1459 __be32 daddr; 1458 __be32 daddr;
1460 struct rtable *rt = skb_rtable(skb); 1459 struct rtable *rt = skb_rtable(skb);
1461 1460
1462 if (ip_options_echo(&replyopts.opt, skb)) 1461 if (ip_options_echo(&replyopts.opt.opt, skb))
1463 return; 1462 return;
1464 1463
1465 daddr = ipc.addr = rt->rt_src; 1464 daddr = ipc.addr = rt->rt_src;
1466 ipc.opt = NULL; 1465 ipc.opt = NULL;
1467 ipc.tx_flags = 0; 1466 ipc.tx_flags = 0;
1468 1467
1469 if (replyopts.opt.optlen) { 1468 if (replyopts.opt.opt.optlen) {
1470 ipc.opt = &replyopts.opt; 1469 ipc.opt = &replyopts.opt;
1471 1470
1472 if (ipc.opt->srr) 1471 if (replyopts.opt.opt.srr)
1473 daddr = replyopts.opt.faddr; 1472 daddr = replyopts.opt.opt.faddr;
1474 } 1473 }
1475 1474
1476 { 1475 {
1477 struct flowi4 fl4 = { 1476 struct flowi4 fl4;
1478 .flowi4_oif = arg->bound_dev_if, 1477
1479 .daddr = daddr, 1478 flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1480 .saddr = rt->rt_spec_dst, 1479 RT_TOS(ip_hdr(skb)->tos),
1481 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), 1480 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1482 .fl4_sport = tcp_hdr(skb)->dest, 1481 ip_reply_arg_flowi_flags(arg),
1483 .fl4_dport = tcp_hdr(skb)->source, 1482 daddr, rt->rt_spec_dst,
1484 .flowi4_proto = sk->sk_protocol, 1483 tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1485 .flowi4_flags = ip_reply_arg_flowi_flags(arg),
1486 };
1487 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 1484 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1488 rt = ip_route_output_key(sock_net(sk), &fl4); 1485 rt = ip_route_output_key(sock_net(sk), &fl4);
1489 if (IS_ERR(rt)) 1486 if (IS_ERR(rt))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 3948c86e59ca..ab0c9efd1efa 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -131,7 +131,7 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
131static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 131static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
132{ 132{
133 struct sockaddr_in sin; 133 struct sockaddr_in sin;
134 struct iphdr *iph = ip_hdr(skb); 134 const struct iphdr *iph = ip_hdr(skb);
135 __be16 *ports = (__be16 *)skb_transport_header(skb); 135 __be16 *ports = (__be16 *)skb_transport_header(skb);
136 136
137 if (skb_transport_offset(skb) + 4 > skb->len) 137 if (skb_transport_offset(skb) + 4 > skb->len)
@@ -451,6 +451,11 @@ out:
451} 451}
452 452
453 453
454static void opt_kfree_rcu(struct rcu_head *head)
455{
456 kfree(container_of(head, struct ip_options_rcu, rcu));
457}
458
454/* 459/*
455 * Socket option code for IP. This is the end of the line after any 460 * Socket option code for IP. This is the end of the line after any
456 * TCP,UDP etc options on an IP socket. 461 * TCP,UDP etc options on an IP socket.
@@ -497,13 +502,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
497 switch (optname) { 502 switch (optname) {
498 case IP_OPTIONS: 503 case IP_OPTIONS:
499 { 504 {
500 struct ip_options *opt = NULL; 505 struct ip_options_rcu *old, *opt = NULL;
506
501 if (optlen > 40) 507 if (optlen > 40)
502 goto e_inval; 508 goto e_inval;
503 err = ip_options_get_from_user(sock_net(sk), &opt, 509 err = ip_options_get_from_user(sock_net(sk), &opt,
504 optval, optlen); 510 optval, optlen);
505 if (err) 511 if (err)
506 break; 512 break;
513 old = rcu_dereference_protected(inet->inet_opt,
514 sock_owned_by_user(sk));
507 if (inet->is_icsk) { 515 if (inet->is_icsk) {
508 struct inet_connection_sock *icsk = inet_csk(sk); 516 struct inet_connection_sock *icsk = inet_csk(sk);
509#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 517#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -512,17 +520,18 @@ static int do_ip_setsockopt(struct sock *sk, int level,
512 (TCPF_LISTEN | TCPF_CLOSE)) && 520 (TCPF_LISTEN | TCPF_CLOSE)) &&
513 inet->inet_daddr != LOOPBACK4_IPV6)) { 521 inet->inet_daddr != LOOPBACK4_IPV6)) {
514#endif 522#endif
515 if (inet->opt) 523 if (old)
516 icsk->icsk_ext_hdr_len -= inet->opt->optlen; 524 icsk->icsk_ext_hdr_len -= old->opt.optlen;
517 if (opt) 525 if (opt)
518 icsk->icsk_ext_hdr_len += opt->optlen; 526 icsk->icsk_ext_hdr_len += opt->opt.optlen;
519 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); 527 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
520#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 528#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
521 } 529 }
522#endif 530#endif
523 } 531 }
524 opt = xchg(&inet->opt, opt); 532 rcu_assign_pointer(inet->inet_opt, opt);
525 kfree(opt); 533 if (old)
534 call_rcu(&old->rcu, opt_kfree_rcu);
526 break; 535 break;
527 } 536 }
528 case IP_PKTINFO: 537 case IP_PKTINFO:
@@ -1081,12 +1090,16 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1081 case IP_OPTIONS: 1090 case IP_OPTIONS:
1082 { 1091 {
1083 unsigned char optbuf[sizeof(struct ip_options)+40]; 1092 unsigned char optbuf[sizeof(struct ip_options)+40];
1084 struct ip_options * opt = (struct ip_options *)optbuf; 1093 struct ip_options *opt = (struct ip_options *)optbuf;
1094 struct ip_options_rcu *inet_opt;
1095
1096 inet_opt = rcu_dereference_protected(inet->inet_opt,
1097 sock_owned_by_user(sk));
1085 opt->optlen = 0; 1098 opt->optlen = 0;
1086 if (inet->opt) 1099 if (inet_opt)
1087 memcpy(optbuf, inet->opt, 1100 memcpy(optbuf, &inet_opt->opt,
1088 sizeof(struct ip_options)+ 1101 sizeof(struct ip_options) +
1089 inet->opt->optlen); 1102 inet_opt->opt.optlen);
1090 release_sock(sk); 1103 release_sock(sk);
1091 1104
1092 if (opt->optlen == 0) 1105 if (opt->optlen == 0)
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 629067571f02..c857f6f49b03 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -27,7 +27,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
27{ 27{
28 struct net *net = dev_net(skb->dev); 28 struct net *net = dev_net(skb->dev);
29 __be32 spi; 29 __be32 spi;
30 struct iphdr *iph = (struct iphdr *)skb->data; 30 const struct iphdr *iph = (const struct iphdr *)skb->data;
31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); 31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
32 struct xfrm_state *x; 32 struct xfrm_state *x;
33 33
@@ -36,7 +36,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
36 return; 36 return;
37 37
38 spi = htonl(ntohs(ipch->cpi)); 38 spi = htonl(ntohs(ipch->cpi));
39 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, 39 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
40 spi, IPPROTO_COMP, AF_INET); 40 spi, IPPROTO_COMP, AF_INET);
41 if (!x) 41 if (!x)
42 return; 42 return;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index bfc17c5914e7..378b20b7ca6e 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -276,11 +276,6 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
276 276
277 dev_net_set(dev, net); 277 dev_net_set(dev, net);
278 278
279 if (strchr(name, '%')) {
280 if (dev_alloc_name(dev, name) < 0)
281 goto failed_free;
282 }
283
284 nt = netdev_priv(dev); 279 nt = netdev_priv(dev);
285 nt->parms = *parms; 280 nt->parms = *parms;
286 281
@@ -319,7 +314,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
319 8 bytes of packet payload. It means, that precise relaying of 314 8 bytes of packet payload. It means, that precise relaying of
320 ICMP in the real Internet is absolutely infeasible. 315 ICMP in the real Internet is absolutely infeasible.
321 */ 316 */
322 struct iphdr *iph = (struct iphdr *)skb->data; 317 const struct iphdr *iph = (const struct iphdr *)skb->data;
323 const int type = icmp_hdr(skb)->type; 318 const int type = icmp_hdr(skb)->type;
324 const int code = icmp_hdr(skb)->code; 319 const int code = icmp_hdr(skb)->code;
325 struct ip_tunnel *t; 320 struct ip_tunnel *t;
@@ -433,15 +428,16 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
433{ 428{
434 struct ip_tunnel *tunnel = netdev_priv(dev); 429 struct ip_tunnel *tunnel = netdev_priv(dev);
435 struct pcpu_tstats *tstats; 430 struct pcpu_tstats *tstats;
436 struct iphdr *tiph = &tunnel->parms.iph; 431 const struct iphdr *tiph = &tunnel->parms.iph;
437 u8 tos = tunnel->parms.iph.tos; 432 u8 tos = tunnel->parms.iph.tos;
438 __be16 df = tiph->frag_off; 433 __be16 df = tiph->frag_off;
439 struct rtable *rt; /* Route to the other host */ 434 struct rtable *rt; /* Route to the other host */
440 struct net_device *tdev; /* Device to other host */ 435 struct net_device *tdev; /* Device to other host */
441 struct iphdr *old_iph = ip_hdr(skb); 436 const struct iphdr *old_iph = ip_hdr(skb);
442 struct iphdr *iph; /* Our new IP header */ 437 struct iphdr *iph; /* Our new IP header */
443 unsigned int max_headroom; /* The extra header space needed */ 438 unsigned int max_headroom; /* The extra header space needed */
444 __be32 dst = tiph->daddr; 439 __be32 dst = tiph->daddr;
440 struct flowi4 fl4;
445 int mtu; 441 int mtu;
446 442
447 if (skb->protocol != htons(ETH_P_IP)) 443 if (skb->protocol != htons(ETH_P_IP))
@@ -460,7 +456,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
460 goto tx_error_icmp; 456 goto tx_error_icmp;
461 } 457 }
462 458
463 rt = ip_route_output_ports(dev_net(dev), NULL, 459 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
464 dst, tiph->saddr, 460 dst, tiph->saddr,
465 0, 0, 461 0, 0,
466 IPPROTO_IPIP, RT_TOS(tos), 462 IPPROTO_IPIP, RT_TOS(tos),
@@ -549,8 +545,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
549 iph->frag_off = df; 545 iph->frag_off = df;
550 iph->protocol = IPPROTO_IPIP; 546 iph->protocol = IPPROTO_IPIP;
551 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos); 547 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos);
552 iph->daddr = rt->rt_dst; 548 iph->daddr = fl4.daddr;
553 iph->saddr = rt->rt_src; 549 iph->saddr = fl4.saddr;
554 550
555 if ((iph->ttl = tiph->ttl) == 0) 551 if ((iph->ttl = tiph->ttl) == 0)
556 iph->ttl = old_iph->ttl; 552 iph->ttl = old_iph->ttl;
@@ -572,19 +568,21 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
572{ 568{
573 struct net_device *tdev = NULL; 569 struct net_device *tdev = NULL;
574 struct ip_tunnel *tunnel; 570 struct ip_tunnel *tunnel;
575 struct iphdr *iph; 571 const struct iphdr *iph;
576 572
577 tunnel = netdev_priv(dev); 573 tunnel = netdev_priv(dev);
578 iph = &tunnel->parms.iph; 574 iph = &tunnel->parms.iph;
579 575
580 if (iph->daddr) { 576 if (iph->daddr) {
581 struct rtable *rt = ip_route_output_ports(dev_net(dev), NULL, 577 struct rtable *rt;
582 iph->daddr, iph->saddr, 578 struct flowi4 fl4;
583 0, 0, 579
584 IPPROTO_IPIP, 580 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
585 RT_TOS(iph->tos), 581 iph->daddr, iph->saddr,
586 tunnel->parms.link); 582 0, 0,
587 583 IPPROTO_IPIP,
584 RT_TOS(iph->tos),
585 tunnel->parms.link);
588 if (!IS_ERR(rt)) { 586 if (!IS_ERR(rt)) {
589 tdev = rt->dst.dev; 587 tdev = rt->dst.dev;
590 ip_rt_put(rt); 588 ip_rt_put(rt);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 1f62eaeb6de4..30a7763c400e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1549,7 +1549,7 @@ static struct notifier_block ip_mr_notifier = {
1549static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) 1549static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1550{ 1550{
1551 struct iphdr *iph; 1551 struct iphdr *iph;
1552 struct iphdr *old_iph = ip_hdr(skb); 1552 const struct iphdr *old_iph = ip_hdr(skb);
1553 1553
1554 skb_push(skb, sizeof(struct iphdr)); 1554 skb_push(skb, sizeof(struct iphdr));
1555 skb->transport_header = skb->network_header; 1555 skb->transport_header = skb->network_header;
@@ -1595,6 +1595,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1595 struct vif_device *vif = &mrt->vif_table[vifi]; 1595 struct vif_device *vif = &mrt->vif_table[vifi];
1596 struct net_device *dev; 1596 struct net_device *dev;
1597 struct rtable *rt; 1597 struct rtable *rt;
1598 struct flowi4 fl4;
1598 int encap = 0; 1599 int encap = 0;
1599 1600
1600 if (vif->dev == NULL) 1601 if (vif->dev == NULL)
@@ -1612,7 +1613,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1612#endif 1613#endif
1613 1614
1614 if (vif->flags & VIFF_TUNNEL) { 1615 if (vif->flags & VIFF_TUNNEL) {
1615 rt = ip_route_output_ports(net, NULL, 1616 rt = ip_route_output_ports(net, &fl4, NULL,
1616 vif->remote, vif->local, 1617 vif->remote, vif->local,
1617 0, 0, 1618 0, 0,
1618 IPPROTO_IPIP, 1619 IPPROTO_IPIP,
@@ -1621,7 +1622,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1621 goto out_free; 1622 goto out_free;
1622 encap = sizeof(struct iphdr); 1623 encap = sizeof(struct iphdr);
1623 } else { 1624 } else {
1624 rt = ip_route_output_ports(net, NULL, iph->daddr, 0, 1625 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1625 0, 0, 1626 0, 0,
1626 IPPROTO_IPIP, 1627 IPPROTO_IPIP,
1627 RT_TOS(iph->tos), vif->link); 1628 RT_TOS(iph->tos), vif->link);
@@ -1788,12 +1789,14 @@ dont_forward:
1788 return 0; 1789 return 0;
1789} 1790}
1790 1791
1791static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct rtable *rt) 1792static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1792{ 1793{
1794 struct rtable *rt = skb_rtable(skb);
1795 struct iphdr *iph = ip_hdr(skb);
1793 struct flowi4 fl4 = { 1796 struct flowi4 fl4 = {
1794 .daddr = rt->rt_key_dst, 1797 .daddr = iph->daddr,
1795 .saddr = rt->rt_key_src, 1798 .saddr = iph->saddr,
1796 .flowi4_tos = rt->rt_tos, 1799 .flowi4_tos = iph->tos,
1797 .flowi4_oif = rt->rt_oif, 1800 .flowi4_oif = rt->rt_oif,
1798 .flowi4_iif = rt->rt_iif, 1801 .flowi4_iif = rt->rt_iif,
1799 .flowi4_mark = rt->rt_mark, 1802 .flowi4_mark = rt->rt_mark,
@@ -1825,7 +1828,7 @@ int ip_mr_input(struct sk_buff *skb)
1825 if (IPCB(skb)->flags & IPSKB_FORWARDED) 1828 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1826 goto dont_forward; 1829 goto dont_forward;
1827 1830
1828 mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb)); 1831 mrt = ipmr_rt_fib_lookup(net, skb);
1829 if (IS_ERR(mrt)) { 1832 if (IS_ERR(mrt)) {
1830 kfree_skb(skb); 1833 kfree_skb(skb);
1831 return PTR_ERR(mrt); 1834 return PTR_ERR(mrt);
@@ -1957,7 +1960,7 @@ int pim_rcv_v1(struct sk_buff *skb)
1957 1960
1958 pim = igmp_hdr(skb); 1961 pim = igmp_hdr(skb);
1959 1962
1960 mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb)); 1963 mrt = ipmr_rt_fib_lookup(net, skb);
1961 if (IS_ERR(mrt)) 1964 if (IS_ERR(mrt))
1962 goto drop; 1965 goto drop;
1963 if (!mrt->mroute_do_pim || 1966 if (!mrt->mroute_do_pim ||
@@ -1989,7 +1992,7 @@ static int pim_rcv(struct sk_buff *skb)
1989 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 1992 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1990 goto drop; 1993 goto drop;
1991 1994
1992 mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb)); 1995 mrt = ipmr_rt_fib_lookup(net, skb);
1993 if (IS_ERR(mrt)) 1996 if (IS_ERR(mrt))
1994 goto drop; 1997 goto drop;
1995 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 1998 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
@@ -2038,20 +2041,20 @@ rtattr_failure:
2038 return -EMSGSIZE; 2041 return -EMSGSIZE;
2039} 2042}
2040 2043
2041int ipmr_get_route(struct net *net, 2044int ipmr_get_route(struct net *net, struct sk_buff *skb,
2042 struct sk_buff *skb, struct rtmsg *rtm, int nowait) 2045 __be32 saddr, __be32 daddr,
2046 struct rtmsg *rtm, int nowait)
2043{ 2047{
2044 int err;
2045 struct mr_table *mrt;
2046 struct mfc_cache *cache; 2048 struct mfc_cache *cache;
2047 struct rtable *rt = skb_rtable(skb); 2049 struct mr_table *mrt;
2050 int err;
2048 2051
2049 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2052 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2050 if (mrt == NULL) 2053 if (mrt == NULL)
2051 return -ENOENT; 2054 return -ENOENT;
2052 2055
2053 rcu_read_lock(); 2056 rcu_read_lock();
2054 cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst); 2057 cache = ipmr_cache_find(mrt, saddr, daddr);
2055 2058
2056 if (cache == NULL) { 2059 if (cache == NULL) {
2057 struct sk_buff *skb2; 2060 struct sk_buff *skb2;
@@ -2084,8 +2087,8 @@ int ipmr_get_route(struct net *net,
2084 skb_reset_network_header(skb2); 2087 skb_reset_network_header(skb2);
2085 iph = ip_hdr(skb2); 2088 iph = ip_hdr(skb2);
2086 iph->ihl = sizeof(struct iphdr) >> 2; 2089 iph->ihl = sizeof(struct iphdr) >> 2;
2087 iph->saddr = rt->rt_src; 2090 iph->saddr = saddr;
2088 iph->daddr = rt->rt_dst; 2091 iph->daddr = daddr;
2089 iph->version = 0; 2092 iph->version = 0;
2090 err = ipmr_cache_unresolved(mrt, vif, skb2); 2093 err = ipmr_cache_unresolved(mrt, vif, skb2);
2091 read_unlock(&mrt_lock); 2094 read_unlock(&mrt_lock);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 89bc7e66d598..fd7a3f68917f 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -260,6 +260,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
260 void *table_base; 260 void *table_base;
261 const struct xt_table_info *private; 261 const struct xt_table_info *private;
262 struct xt_action_param acpar; 262 struct xt_action_param acpar;
263 unsigned int addend;
263 264
264 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) 265 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
265 return NF_DROP; 266 return NF_DROP;
@@ -267,7 +268,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
267 indev = in ? in->name : nulldevname; 268 indev = in ? in->name : nulldevname;
268 outdev = out ? out->name : nulldevname; 269 outdev = out ? out->name : nulldevname;
269 270
270 xt_info_rdlock_bh(); 271 local_bh_disable();
272 addend = xt_write_recseq_begin();
271 private = table->private; 273 private = table->private;
272 table_base = private->entries[smp_processor_id()]; 274 table_base = private->entries[smp_processor_id()];
273 275
@@ -338,7 +340,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
338 /* Verdict */ 340 /* Verdict */
339 break; 341 break;
340 } while (!acpar.hotdrop); 342 } while (!acpar.hotdrop);
341 xt_info_rdunlock_bh(); 343 xt_write_recseq_end(addend);
344 local_bh_enable();
342 345
343 if (acpar.hotdrop) 346 if (acpar.hotdrop)
344 return NF_DROP; 347 return NF_DROP;
@@ -712,7 +715,7 @@ static void get_counters(const struct xt_table_info *t,
712 unsigned int i; 715 unsigned int i;
713 716
714 for_each_possible_cpu(cpu) { 717 for_each_possible_cpu(cpu) {
715 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 718 seqcount_t *s = &per_cpu(xt_recseq, cpu);
716 719
717 i = 0; 720 i = 0;
718 xt_entry_foreach(iter, t->entries[cpu], t->size) { 721 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -720,10 +723,10 @@ static void get_counters(const struct xt_table_info *t,
720 unsigned int start; 723 unsigned int start;
721 724
722 do { 725 do {
723 start = read_seqbegin(lock); 726 start = read_seqcount_begin(s);
724 bcnt = iter->counters.bcnt; 727 bcnt = iter->counters.bcnt;
725 pcnt = iter->counters.pcnt; 728 pcnt = iter->counters.pcnt;
726 } while (read_seqretry(lock, start)); 729 } while (read_seqcount_retry(s, start));
727 730
728 ADD_COUNTER(counters[i], bcnt, pcnt); 731 ADD_COUNTER(counters[i], bcnt, pcnt);
729 ++i; 732 ++i;
@@ -1115,6 +1118,7 @@ static int do_add_counters(struct net *net, const void __user *user,
1115 int ret = 0; 1118 int ret = 0;
1116 void *loc_cpu_entry; 1119 void *loc_cpu_entry;
1117 struct arpt_entry *iter; 1120 struct arpt_entry *iter;
1121 unsigned int addend;
1118#ifdef CONFIG_COMPAT 1122#ifdef CONFIG_COMPAT
1119 struct compat_xt_counters_info compat_tmp; 1123 struct compat_xt_counters_info compat_tmp;
1120 1124
@@ -1171,12 +1175,12 @@ static int do_add_counters(struct net *net, const void __user *user,
1171 /* Choose the copy that is on our node */ 1175 /* Choose the copy that is on our node */
1172 curcpu = smp_processor_id(); 1176 curcpu = smp_processor_id();
1173 loc_cpu_entry = private->entries[curcpu]; 1177 loc_cpu_entry = private->entries[curcpu];
1174 xt_info_wrlock(curcpu); 1178 addend = xt_write_recseq_begin();
1175 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1179 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1176 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1180 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1177 ++i; 1181 ++i;
1178 } 1182 }
1179 xt_info_wrunlock(curcpu); 1183 xt_write_recseq_end(addend);
1180 unlock_up_free: 1184 unlock_up_free:
1181 local_bh_enable(); 1185 local_bh_enable();
1182 xt_table_unlock(t); 1186 xt_table_unlock(t);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 704915028009..764743843503 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -68,15 +68,6 @@ void *ipt_alloc_initial_table(const struct xt_table *info)
68} 68}
69EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); 69EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
70 70
71/*
72 We keep a set of rules for each CPU, so we can avoid write-locking
73 them in the softirq when updating the counters and therefore
74 only need to read-lock in the softirq; doing a write_lock_bh() in user
75 context stops packets coming through and allows user context to read
76 the counters or update the rules.
77
78 Hence the start of any table is given by get_table() below. */
79
80/* Returns whether matches rule or not. */ 71/* Returns whether matches rule or not. */
81/* Performance critical - called for every packet */ 72/* Performance critical - called for every packet */
82static inline bool 73static inline bool
@@ -311,6 +302,7 @@ ipt_do_table(struct sk_buff *skb,
311 unsigned int *stackptr, origptr, cpu; 302 unsigned int *stackptr, origptr, cpu;
312 const struct xt_table_info *private; 303 const struct xt_table_info *private;
313 struct xt_action_param acpar; 304 struct xt_action_param acpar;
305 unsigned int addend;
314 306
315 /* Initialization */ 307 /* Initialization */
316 ip = ip_hdr(skb); 308 ip = ip_hdr(skb);
@@ -331,7 +323,8 @@ ipt_do_table(struct sk_buff *skb,
331 acpar.hooknum = hook; 323 acpar.hooknum = hook;
332 324
333 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 325 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
334 xt_info_rdlock_bh(); 326 local_bh_disable();
327 addend = xt_write_recseq_begin();
335 private = table->private; 328 private = table->private;
336 cpu = smp_processor_id(); 329 cpu = smp_processor_id();
337 table_base = private->entries[cpu]; 330 table_base = private->entries[cpu];
@@ -430,7 +423,9 @@ ipt_do_table(struct sk_buff *skb,
430 pr_debug("Exiting %s; resetting sp from %u to %u\n", 423 pr_debug("Exiting %s; resetting sp from %u to %u\n",
431 __func__, *stackptr, origptr); 424 __func__, *stackptr, origptr);
432 *stackptr = origptr; 425 *stackptr = origptr;
433 xt_info_rdunlock_bh(); 426 xt_write_recseq_end(addend);
427 local_bh_enable();
428
434#ifdef DEBUG_ALLOW_ALL 429#ifdef DEBUG_ALLOW_ALL
435 return NF_ACCEPT; 430 return NF_ACCEPT;
436#else 431#else
@@ -886,7 +881,7 @@ get_counters(const struct xt_table_info *t,
886 unsigned int i; 881 unsigned int i;
887 882
888 for_each_possible_cpu(cpu) { 883 for_each_possible_cpu(cpu) {
889 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 884 seqcount_t *s = &per_cpu(xt_recseq, cpu);
890 885
891 i = 0; 886 i = 0;
892 xt_entry_foreach(iter, t->entries[cpu], t->size) { 887 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -894,10 +889,10 @@ get_counters(const struct xt_table_info *t,
894 unsigned int start; 889 unsigned int start;
895 890
896 do { 891 do {
897 start = read_seqbegin(lock); 892 start = read_seqcount_begin(s);
898 bcnt = iter->counters.bcnt; 893 bcnt = iter->counters.bcnt;
899 pcnt = iter->counters.pcnt; 894 pcnt = iter->counters.pcnt;
900 } while (read_seqretry(lock, start)); 895 } while (read_seqcount_retry(s, start));
901 896
902 ADD_COUNTER(counters[i], bcnt, pcnt); 897 ADD_COUNTER(counters[i], bcnt, pcnt);
903 ++i; /* macro does multi eval of i */ 898 ++i; /* macro does multi eval of i */
@@ -1312,6 +1307,7 @@ do_add_counters(struct net *net, const void __user *user,
1312 int ret = 0; 1307 int ret = 0;
1313 void *loc_cpu_entry; 1308 void *loc_cpu_entry;
1314 struct ipt_entry *iter; 1309 struct ipt_entry *iter;
1310 unsigned int addend;
1315#ifdef CONFIG_COMPAT 1311#ifdef CONFIG_COMPAT
1316 struct compat_xt_counters_info compat_tmp; 1312 struct compat_xt_counters_info compat_tmp;
1317 1313
@@ -1368,12 +1364,12 @@ do_add_counters(struct net *net, const void __user *user,
1368 /* Choose the copy that is on our node */ 1364 /* Choose the copy that is on our node */
1369 curcpu = smp_processor_id(); 1365 curcpu = smp_processor_id();
1370 loc_cpu_entry = private->entries[curcpu]; 1366 loc_cpu_entry = private->entries[curcpu];
1371 xt_info_wrlock(curcpu); 1367 addend = xt_write_recseq_begin();
1372 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1368 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1373 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1369 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1374 ++i; 1370 ++i;
1375 } 1371 }
1376 xt_info_wrunlock(curcpu); 1372 xt_write_recseq_end(addend);
1377 unlock_up_free: 1373 unlock_up_free:
1378 local_bh_enable(); 1374 local_bh_enable();
1379 xt_table_unlock(t); 1375 xt_table_unlock(t);
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 31427fb57aa8..99cfa28b6d38 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -153,7 +153,7 @@ void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
153} 153}
154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust); 154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
155 155
156static void nf_nat_csum(struct sk_buff *skb, struct iphdr *iph, void *data, 156static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
157 int datalen, __sum16 *check, int oldlen) 157 int datalen, __sum16 *check, int oldlen)
158{ 158{
159 struct rtable *rt = skb_rtable(skb); 159 struct rtable *rt = skb_rtable(skb);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bceaec42c37d..a8659e0c4a6e 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -154,7 +154,7 @@ static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
154 * RFC 1122: SHOULD pass TOS value up to the transport layer. 154 * RFC 1122: SHOULD pass TOS value up to the transport layer.
155 * -> It does. And not only TOS, but all IP header. 155 * -> It does. And not only TOS, but all IP header.
156 */ 156 */
157static int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash) 157static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
158{ 158{
159 struct sock *sk; 159 struct sock *sk;
160 struct hlist_head *head; 160 struct hlist_head *head;
@@ -247,7 +247,7 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
247 } 247 }
248 248
249 if (inet->recverr) { 249 if (inet->recverr) {
250 struct iphdr *iph = (struct iphdr *)skb->data; 250 const struct iphdr *iph = (const struct iphdr *)skb->data;
251 u8 *payload = skb->data + (iph->ihl << 2); 251 u8 *payload = skb->data + (iph->ihl << 2);
252 252
253 if (inet->hdrincl) 253 if (inet->hdrincl)
@@ -265,7 +265,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
265{ 265{
266 int hash; 266 int hash;
267 struct sock *raw_sk; 267 struct sock *raw_sk;
268 struct iphdr *iph; 268 const struct iphdr *iph;
269 struct net *net; 269 struct net *net;
270 270
271 hash = protocol & (RAW_HTABLE_SIZE - 1); 271 hash = protocol & (RAW_HTABLE_SIZE - 1);
@@ -273,7 +273,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
273 read_lock(&raw_v4_hashinfo.lock); 273 read_lock(&raw_v4_hashinfo.lock);
274 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); 274 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
275 if (raw_sk != NULL) { 275 if (raw_sk != NULL) {
276 iph = (struct iphdr *)skb->data; 276 iph = (const struct iphdr *)skb->data;
277 net = dev_net(skb->dev); 277 net = dev_net(skb->dev);
278 278
279 while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol, 279 while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol,
@@ -281,7 +281,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
281 skb->dev->ifindex)) != NULL) { 281 skb->dev->ifindex)) != NULL) {
282 raw_err(raw_sk, skb, info); 282 raw_err(raw_sk, skb, info);
283 raw_sk = sk_next(raw_sk); 283 raw_sk = sk_next(raw_sk);
284 iph = (struct iphdr *)skb->data; 284 iph = (const struct iphdr *)skb->data;
285 } 285 }
286 } 286 }
287 read_unlock(&raw_v4_hashinfo.lock); 287 read_unlock(&raw_v4_hashinfo.lock);
@@ -460,6 +460,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
460 __be32 saddr; 460 __be32 saddr;
461 u8 tos; 461 u8 tos;
462 int err; 462 int err;
463 struct ip_options_data opt_copy;
463 464
464 err = -EMSGSIZE; 465 err = -EMSGSIZE;
465 if (len > 0xFFFF) 466 if (len > 0xFFFF)
@@ -520,8 +521,18 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
520 saddr = ipc.addr; 521 saddr = ipc.addr;
521 ipc.addr = daddr; 522 ipc.addr = daddr;
522 523
523 if (!ipc.opt) 524 if (!ipc.opt) {
524 ipc.opt = inet->opt; 525 struct ip_options_rcu *inet_opt;
526
527 rcu_read_lock();
528 inet_opt = rcu_dereference(inet->inet_opt);
529 if (inet_opt) {
530 memcpy(&opt_copy, inet_opt,
531 sizeof(*inet_opt) + inet_opt->opt.optlen);
532 ipc.opt = &opt_copy.opt;
533 }
534 rcu_read_unlock();
535 }
525 536
526 if (ipc.opt) { 537 if (ipc.opt) {
527 err = -EINVAL; 538 err = -EINVAL;
@@ -530,10 +541,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
530 */ 541 */
531 if (inet->hdrincl) 542 if (inet->hdrincl)
532 goto done; 543 goto done;
533 if (ipc.opt->srr) { 544 if (ipc.opt->opt.srr) {
534 if (!daddr) 545 if (!daddr)
535 goto done; 546 goto done;
536 daddr = ipc.opt->faddr; 547 daddr = ipc.opt->opt.faddr;
537 } 548 }
538 } 549 }
539 tos = RT_CONN_FLAGS(sk); 550 tos = RT_CONN_FLAGS(sk);
@@ -548,17 +559,13 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
548 } 559 }
549 560
550 { 561 {
551 struct flowi4 fl4 = { 562 struct flowi4 fl4;
552 .flowi4_oif = ipc.oif, 563
553 .flowi4_mark = sk->sk_mark, 564 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
554 .daddr = daddr, 565 RT_SCOPE_UNIVERSE,
555 .saddr = saddr, 566 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
556 .flowi4_tos = tos, 567 FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0);
557 .flowi4_proto = (inet->hdrincl ? 568
558 IPPROTO_RAW :
559 sk->sk_protocol),
560 .flowi4_flags = FLOWI_FLAG_CAN_SLEEP,
561 };
562 if (!inet->hdrincl) { 569 if (!inet->hdrincl) {
563 err = raw_probe_proto_opt(&fl4, msg); 570 err = raw_probe_proto_opt(&fl4, msg);
564 if (err) 571 if (err)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 99e6e4bb1c72..6a83840b16af 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -424,7 +424,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
424 dst_metric(&r->dst, RTAX_WINDOW), 424 dst_metric(&r->dst, RTAX_WINDOW),
425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) + 425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
426 dst_metric(&r->dst, RTAX_RTTVAR)), 426 dst_metric(&r->dst, RTAX_RTTVAR)),
427 r->rt_tos, 427 r->rt_key_tos,
428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1, 428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
429 r->dst.hh ? (r->dst.hh->hh_output == 429 r->dst.hh ? (r->dst.hh->hh_output ==
430 dev_queue_xmit) : 0, 430 dev_queue_xmit) : 0,
@@ -724,7 +724,7 @@ static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
724 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) | 724 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | 725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
726 (rt1->rt_mark ^ rt2->rt_mark) | 726 (rt1->rt_mark ^ rt2->rt_mark) |
727 (rt1->rt_tos ^ rt2->rt_tos) | 727 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
728 (rt1->rt_oif ^ rt2->rt_oif) | 728 (rt1->rt_oif ^ rt2->rt_oif) |
729 (rt1->rt_iif ^ rt2->rt_iif)) == 0; 729 (rt1->rt_iif ^ rt2->rt_iif)) == 0;
730} 730}
@@ -1349,7 +1349,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1349 rt_genid(dev_net(dst->dev))); 1349 rt_genid(dev_net(dst->dev)));
1350#if RT_CACHE_DEBUG >= 1 1350#if RT_CACHE_DEBUG >= 1
1351 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n", 1351 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1352 &rt->rt_dst, rt->rt_tos); 1352 &rt->rt_dst, rt->rt_key_tos);
1353#endif 1353#endif
1354 rt_del(hash, rt); 1354 rt_del(hash, rt);
1355 ret = NULL; 1355 ret = NULL;
@@ -1507,7 +1507,7 @@ static inline unsigned short guess_mtu(unsigned short old_mtu)
1507 return 68; 1507 return 68;
1508} 1508}
1509 1509
1510unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, 1510unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1511 unsigned short new_mtu, 1511 unsigned short new_mtu,
1512 struct net_device *dev) 1512 struct net_device *dev)
1513{ 1513{
@@ -1710,7 +1710,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1710 struct flowi4 fl4 = { 1710 struct flowi4 fl4 = {
1711 .daddr = rt->rt_key_dst, 1711 .daddr = rt->rt_key_dst,
1712 .saddr = rt->rt_key_src, 1712 .saddr = rt->rt_key_src,
1713 .flowi4_tos = rt->rt_tos, 1713 .flowi4_tos = rt->rt_key_tos,
1714 .flowi4_oif = rt->rt_oif, 1714 .flowi4_oif = rt->rt_oif,
1715 .flowi4_iif = rt->rt_iif, 1715 .flowi4_iif = rt->rt_iif,
1716 .flowi4_mark = rt->rt_mark, 1716 .flowi4_mark = rt->rt_mark,
@@ -1767,7 +1767,7 @@ static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1767 return mtu; 1767 return mtu;
1768} 1768}
1769 1769
1770static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4, 1770static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1771 struct fib_info *fi) 1771 struct fib_info *fi)
1772{ 1772{
1773 struct inet_peer *peer; 1773 struct inet_peer *peer;
@@ -1776,7 +1776,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4,
1776 /* If a peer entry exists for this destination, we must hook 1776 /* If a peer entry exists for this destination, we must hook
1777 * it up in order to get at cached metrics. 1777 * it up in order to get at cached metrics.
1778 */ 1778 */
1779 if (oldflp4 && (oldflp4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS)) 1779 if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
1780 create = 1; 1780 create = 1;
1781 1781
1782 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create); 1782 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
@@ -1803,7 +1803,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4,
1803 } 1803 }
1804} 1804}
1805 1805
1806static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4, 1806static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
1807 const struct fib_result *res, 1807 const struct fib_result *res,
1808 struct fib_info *fi, u16 type, u32 itag) 1808 struct fib_info *fi, u16 type, u32 itag)
1809{ 1809{
@@ -1813,7 +1813,7 @@ static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4,
1813 if (FIB_RES_GW(*res) && 1813 if (FIB_RES_GW(*res) &&
1814 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 1814 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1815 rt->rt_gateway = FIB_RES_GW(*res); 1815 rt->rt_gateway = FIB_RES_GW(*res);
1816 rt_init_metrics(rt, oldflp4, fi); 1816 rt_init_metrics(rt, fl4, fi);
1817#ifdef CONFIG_IP_ROUTE_CLASSID 1817#ifdef CONFIG_IP_ROUTE_CLASSID
1818 dst->tclassid = FIB_RES_NH(*res).nh_tclassid; 1818 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1819#endif 1819#endif
@@ -1830,20 +1830,15 @@ static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4,
1830#endif 1830#endif
1831 set_class_tag(rt, itag); 1831 set_class_tag(rt, itag);
1832#endif 1832#endif
1833 rt->rt_type = type;
1834} 1833}
1835 1834
1836static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm) 1835static struct rtable *rt_dst_alloc(struct net_device *dev,
1836 bool nopolicy, bool noxfrm)
1837{ 1837{
1838 struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1); 1838 return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1839 if (rt) { 1839 DST_HOST |
1840 rt->dst.obsolete = -1; 1840 (nopolicy ? DST_NOPOLICY : 0) |
1841 1841 (noxfrm ? DST_NOXFRM : 0));
1842 rt->dst.flags = DST_HOST |
1843 (nopolicy ? DST_NOPOLICY : 0) |
1844 (noxfrm ? DST_NOXFRM : 0);
1845 }
1846 return rt;
1847} 1842}
1848 1843
1849/* called in rcu_read_lock() section */ 1844/* called in rcu_read_lock() section */
@@ -1871,36 +1866,38 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1871 goto e_inval; 1866 goto e_inval;
1872 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 1867 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1873 } else { 1868 } else {
1874 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, 1869 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1875 &itag, 0); 1870 &itag);
1876 if (err < 0) 1871 if (err < 0)
1877 goto e_err; 1872 goto e_err;
1878 } 1873 }
1879 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false); 1874 rth = rt_dst_alloc(init_net.loopback_dev,
1875 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1880 if (!rth) 1876 if (!rth)
1881 goto e_nobufs; 1877 goto e_nobufs;
1882 1878
1879#ifdef CONFIG_IP_ROUTE_CLASSID
1880 rth->dst.tclassid = itag;
1881#endif
1883 rth->dst.output = ip_rt_bug; 1882 rth->dst.output = ip_rt_bug;
1884 1883
1885 rth->rt_key_dst = daddr; 1884 rth->rt_key_dst = daddr;
1886 rth->rt_dst = daddr;
1887 rth->rt_tos = tos;
1888 rth->rt_mark = skb->mark;
1889 rth->rt_key_src = saddr; 1885 rth->rt_key_src = saddr;
1886 rth->rt_genid = rt_genid(dev_net(dev));
1887 rth->rt_flags = RTCF_MULTICAST;
1888 rth->rt_type = RTN_MULTICAST;
1889 rth->rt_key_tos = tos;
1890 rth->rt_dst = daddr;
1890 rth->rt_src = saddr; 1891 rth->rt_src = saddr;
1891#ifdef CONFIG_IP_ROUTE_CLASSID
1892 rth->dst.tclassid = itag;
1893#endif
1894 rth->rt_route_iif = dev->ifindex; 1892 rth->rt_route_iif = dev->ifindex;
1895 rth->rt_iif = dev->ifindex; 1893 rth->rt_iif = dev->ifindex;
1896 rth->dst.dev = init_net.loopback_dev;
1897 dev_hold(rth->dst.dev);
1898 rth->rt_oif = 0; 1894 rth->rt_oif = 0;
1895 rth->rt_mark = skb->mark;
1899 rth->rt_gateway = daddr; 1896 rth->rt_gateway = daddr;
1900 rth->rt_spec_dst= spec_dst; 1897 rth->rt_spec_dst= spec_dst;
1901 rth->rt_genid = rt_genid(dev_net(dev)); 1898 rth->rt_peer_genid = 0;
1902 rth->rt_flags = RTCF_MULTICAST; 1899 rth->peer = NULL;
1903 rth->rt_type = RTN_MULTICAST; 1900 rth->fi = NULL;
1904 if (our) { 1901 if (our) {
1905 rth->dst.input= ip_local_deliver; 1902 rth->dst.input= ip_local_deliver;
1906 rth->rt_flags |= RTCF_LOCAL; 1903 rth->rt_flags |= RTCF_LOCAL;
@@ -1981,8 +1978,8 @@ static int __mkroute_input(struct sk_buff *skb,
1981 } 1978 }
1982 1979
1983 1980
1984 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), 1981 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1985 in_dev->dev, &spec_dst, &itag, skb->mark); 1982 in_dev->dev, &spec_dst, &itag);
1986 if (err < 0) { 1983 if (err < 0) {
1987 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1984 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1988 saddr); 1985 saddr);
@@ -2013,7 +2010,8 @@ static int __mkroute_input(struct sk_buff *skb,
2013 } 2010 }
2014 } 2011 }
2015 2012
2016 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), 2013 rth = rt_dst_alloc(out_dev->dev,
2014 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2017 IN_DEV_CONF_GET(out_dev, NOXFRM)); 2015 IN_DEV_CONF_GET(out_dev, NOXFRM));
2018 if (!rth) { 2016 if (!rth) {
2019 err = -ENOBUFS; 2017 err = -ENOBUFS;
@@ -2021,27 +2019,28 @@ static int __mkroute_input(struct sk_buff *skb,
2021 } 2019 }
2022 2020
2023 rth->rt_key_dst = daddr; 2021 rth->rt_key_dst = daddr;
2024 rth->rt_dst = daddr;
2025 rth->rt_tos = tos;
2026 rth->rt_mark = skb->mark;
2027 rth->rt_key_src = saddr; 2022 rth->rt_key_src = saddr;
2023 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2024 rth->rt_flags = flags;
2025 rth->rt_type = res->type;
2026 rth->rt_key_tos = tos;
2027 rth->rt_dst = daddr;
2028 rth->rt_src = saddr; 2028 rth->rt_src = saddr;
2029 rth->rt_gateway = daddr;
2030 rth->rt_route_iif = in_dev->dev->ifindex; 2029 rth->rt_route_iif = in_dev->dev->ifindex;
2031 rth->rt_iif = in_dev->dev->ifindex; 2030 rth->rt_iif = in_dev->dev->ifindex;
2032 rth->dst.dev = (out_dev)->dev;
2033 dev_hold(rth->dst.dev);
2034 rth->rt_oif = 0; 2031 rth->rt_oif = 0;
2032 rth->rt_mark = skb->mark;
2033 rth->rt_gateway = daddr;
2035 rth->rt_spec_dst= spec_dst; 2034 rth->rt_spec_dst= spec_dst;
2035 rth->rt_peer_genid = 0;
2036 rth->peer = NULL;
2037 rth->fi = NULL;
2036 2038
2037 rth->dst.input = ip_forward; 2039 rth->dst.input = ip_forward;
2038 rth->dst.output = ip_output; 2040 rth->dst.output = ip_output;
2039 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2040 2041
2041 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag); 2042 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
2042 2043
2043 rth->rt_flags = flags;
2044
2045 *result = rth; 2044 *result = rth;
2046 err = 0; 2045 err = 0;
2047 cleanup: 2046 cleanup:
@@ -2150,9 +2149,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2150 goto brd_input; 2149 goto brd_input;
2151 2150
2152 if (res.type == RTN_LOCAL) { 2151 if (res.type == RTN_LOCAL) {
2153 err = fib_validate_source(saddr, daddr, tos, 2152 err = fib_validate_source(skb, saddr, daddr, tos,
2154 net->loopback_dev->ifindex, 2153 net->loopback_dev->ifindex,
2155 dev, &spec_dst, &itag, skb->mark); 2154 dev, &spec_dst, &itag);
2156 if (err < 0) 2155 if (err < 0)
2157 goto martian_source_keep_err; 2156 goto martian_source_keep_err;
2158 if (err) 2157 if (err)
@@ -2176,8 +2175,8 @@ brd_input:
2176 if (ipv4_is_zeronet(saddr)) 2175 if (ipv4_is_zeronet(saddr))
2177 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 2176 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2178 else { 2177 else {
2179 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, 2178 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2180 &itag, skb->mark); 2179 &itag);
2181 if (err < 0) 2180 if (err < 0)
2182 goto martian_source_keep_err; 2181 goto martian_source_keep_err;
2183 if (err) 2182 if (err)
@@ -2188,36 +2187,42 @@ brd_input:
2188 RT_CACHE_STAT_INC(in_brd); 2187 RT_CACHE_STAT_INC(in_brd);
2189 2188
2190local_input: 2189local_input:
2191 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false); 2190 rth = rt_dst_alloc(net->loopback_dev,
2191 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
2192 if (!rth) 2192 if (!rth)
2193 goto e_nobufs; 2193 goto e_nobufs;
2194 2194
2195 rth->dst.input= ip_local_deliver;
2195 rth->dst.output= ip_rt_bug; 2196 rth->dst.output= ip_rt_bug;
2196 rth->rt_genid = rt_genid(net); 2197#ifdef CONFIG_IP_ROUTE_CLASSID
2198 rth->dst.tclassid = itag;
2199#endif
2197 2200
2198 rth->rt_key_dst = daddr; 2201 rth->rt_key_dst = daddr;
2199 rth->rt_dst = daddr;
2200 rth->rt_tos = tos;
2201 rth->rt_mark = skb->mark;
2202 rth->rt_key_src = saddr; 2202 rth->rt_key_src = saddr;
2203 rth->rt_genid = rt_genid(net);
2204 rth->rt_flags = flags|RTCF_LOCAL;
2205 rth->rt_type = res.type;
2206 rth->rt_key_tos = tos;
2207 rth->rt_dst = daddr;
2203 rth->rt_src = saddr; 2208 rth->rt_src = saddr;
2204#ifdef CONFIG_IP_ROUTE_CLASSID 2209#ifdef CONFIG_IP_ROUTE_CLASSID
2205 rth->dst.tclassid = itag; 2210 rth->dst.tclassid = itag;
2206#endif 2211#endif
2207 rth->rt_route_iif = dev->ifindex; 2212 rth->rt_route_iif = dev->ifindex;
2208 rth->rt_iif = dev->ifindex; 2213 rth->rt_iif = dev->ifindex;
2209 rth->dst.dev = net->loopback_dev; 2214 rth->rt_oif = 0;
2210 dev_hold(rth->dst.dev); 2215 rth->rt_mark = skb->mark;
2211 rth->rt_gateway = daddr; 2216 rth->rt_gateway = daddr;
2212 rth->rt_spec_dst= spec_dst; 2217 rth->rt_spec_dst= spec_dst;
2213 rth->dst.input= ip_local_deliver; 2218 rth->rt_peer_genid = 0;
2214 rth->rt_flags = flags|RTCF_LOCAL; 2219 rth->peer = NULL;
2220 rth->fi = NULL;
2215 if (res.type == RTN_UNREACHABLE) { 2221 if (res.type == RTN_UNREACHABLE) {
2216 rth->dst.input= ip_error; 2222 rth->dst.input= ip_error;
2217 rth->dst.error= -err; 2223 rth->dst.error= -err;
2218 rth->rt_flags &= ~RTCF_LOCAL; 2224 rth->rt_flags &= ~RTCF_LOCAL;
2219 } 2225 }
2220 rth->rt_type = res.type;
2221 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net)); 2226 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2222 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif); 2227 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
2223 err = 0; 2228 err = 0;
@@ -2288,7 +2293,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2288 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) | 2293 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2289 (rth->rt_iif ^ iif) | 2294 (rth->rt_iif ^ iif) |
2290 rth->rt_oif | 2295 rth->rt_oif |
2291 (rth->rt_tos ^ tos)) == 0 && 2296 (rth->rt_key_tos ^ tos)) == 0 &&
2292 rth->rt_mark == skb->mark && 2297 rth->rt_mark == skb->mark &&
2293 net_eq(dev_net(rth->dst.dev), net) && 2298 net_eq(dev_net(rth->dst.dev), net) &&
2294 !rt_is_expired(rth)) { 2299 !rt_is_expired(rth)) {
@@ -2349,12 +2354,12 @@ EXPORT_SYMBOL(ip_route_input_common);
2349/* called with rcu_read_lock() */ 2354/* called with rcu_read_lock() */
2350static struct rtable *__mkroute_output(const struct fib_result *res, 2355static struct rtable *__mkroute_output(const struct fib_result *res,
2351 const struct flowi4 *fl4, 2356 const struct flowi4 *fl4,
2352 const struct flowi4 *oldflp4, 2357 __be32 orig_daddr, __be32 orig_saddr,
2353 struct net_device *dev_out, 2358 int orig_oif, struct net_device *dev_out,
2354 unsigned int flags) 2359 unsigned int flags)
2355{ 2360{
2356 struct fib_info *fi = res->fi; 2361 struct fib_info *fi = res->fi;
2357 u32 tos = RT_FL_TOS(oldflp4); 2362 u32 tos = RT_FL_TOS(fl4);
2358 struct in_device *in_dev; 2363 struct in_device *in_dev;
2359 u16 type = res->type; 2364 u16 type = res->type;
2360 struct rtable *rth; 2365 struct rtable *rth;
@@ -2381,8 +2386,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2381 fi = NULL; 2386 fi = NULL;
2382 } else if (type == RTN_MULTICAST) { 2387 } else if (type == RTN_MULTICAST) {
2383 flags |= RTCF_MULTICAST | RTCF_LOCAL; 2388 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2384 if (!ip_check_mc_rcu(in_dev, oldflp4->daddr, oldflp4->saddr, 2389 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2385 oldflp4->flowi4_proto)) 2390 fl4->flowi4_proto))
2386 flags &= ~RTCF_LOCAL; 2391 flags &= ~RTCF_LOCAL;
2387 /* If multicast route do not exist use 2392 /* If multicast route do not exist use
2388 * default one, but do not gateway in this case. 2393 * default one, but do not gateway in this case.
@@ -2392,29 +2397,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2392 fi = NULL; 2397 fi = NULL;
2393 } 2398 }
2394 2399
2395 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), 2400 rth = rt_dst_alloc(dev_out,
2401 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2396 IN_DEV_CONF_GET(in_dev, NOXFRM)); 2402 IN_DEV_CONF_GET(in_dev, NOXFRM));
2397 if (!rth) 2403 if (!rth)
2398 return ERR_PTR(-ENOBUFS); 2404 return ERR_PTR(-ENOBUFS);
2399 2405
2400 rth->rt_key_dst = oldflp4->daddr; 2406 rth->dst.output = ip_output;
2401 rth->rt_tos = tos; 2407
2402 rth->rt_key_src = oldflp4->saddr; 2408 rth->rt_key_dst = orig_daddr;
2403 rth->rt_oif = oldflp4->flowi4_oif; 2409 rth->rt_key_src = orig_saddr;
2404 rth->rt_mark = oldflp4->flowi4_mark; 2410 rth->rt_genid = rt_genid(dev_net(dev_out));
2411 rth->rt_flags = flags;
2412 rth->rt_type = type;
2413 rth->rt_key_tos = tos;
2405 rth->rt_dst = fl4->daddr; 2414 rth->rt_dst = fl4->daddr;
2406 rth->rt_src = fl4->saddr; 2415 rth->rt_src = fl4->saddr;
2407 rth->rt_route_iif = 0; 2416 rth->rt_route_iif = 0;
2408 rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex; 2417 rth->rt_iif = orig_oif ? : dev_out->ifindex;
2409 /* get references to the devices that are to be hold by the routing 2418 rth->rt_oif = orig_oif;
2410 cache entry */ 2419 rth->rt_mark = fl4->flowi4_mark;
2411 rth->dst.dev = dev_out;
2412 dev_hold(dev_out);
2413 rth->rt_gateway = fl4->daddr; 2420 rth->rt_gateway = fl4->daddr;
2414 rth->rt_spec_dst= fl4->saddr; 2421 rth->rt_spec_dst= fl4->saddr;
2415 2422 rth->rt_peer_genid = 0;
2416 rth->dst.output=ip_output; 2423 rth->peer = NULL;
2417 rth->rt_genid = rt_genid(dev_net(dev_out)); 2424 rth->fi = NULL;
2418 2425
2419 RT_CACHE_STAT_INC(out_slow_tot); 2426 RT_CACHE_STAT_INC(out_slow_tot);
2420 2427
@@ -2432,7 +2439,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2432#ifdef CONFIG_IP_MROUTE 2439#ifdef CONFIG_IP_MROUTE
2433 if (type == RTN_MULTICAST) { 2440 if (type == RTN_MULTICAST) {
2434 if (IN_DEV_MFORWARD(in_dev) && 2441 if (IN_DEV_MFORWARD(in_dev) &&
2435 !ipv4_is_local_multicast(oldflp4->daddr)) { 2442 !ipv4_is_local_multicast(fl4->daddr)) {
2436 rth->dst.input = ip_mr_input; 2443 rth->dst.input = ip_mr_input;
2437 rth->dst.output = ip_mc_output; 2444 rth->dst.output = ip_mc_output;
2438 } 2445 }
@@ -2440,9 +2447,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2440#endif 2447#endif
2441 } 2448 }
2442 2449
2443 rt_set_nexthop(rth, oldflp4, res, fi, type, 0); 2450 rt_set_nexthop(rth, fl4, res, fi, type, 0);
2444 2451
2445 rth->rt_flags = flags;
2446 return rth; 2452 return rth;
2447} 2453}
2448 2454
@@ -2451,36 +2457,37 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2451 * called with rcu_read_lock(); 2457 * called with rcu_read_lock();
2452 */ 2458 */
2453 2459
2454static struct rtable *ip_route_output_slow(struct net *net, 2460static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2455 const struct flowi4 *oldflp4)
2456{ 2461{
2457 u32 tos = RT_FL_TOS(oldflp4);
2458 struct flowi4 fl4;
2459 struct fib_result res;
2460 unsigned int flags = 0;
2461 struct net_device *dev_out = NULL; 2462 struct net_device *dev_out = NULL;
2463 u32 tos = RT_FL_TOS(fl4);
2464 unsigned int flags = 0;
2465 struct fib_result res;
2462 struct rtable *rth; 2466 struct rtable *rth;
2467 __be32 orig_daddr;
2468 __be32 orig_saddr;
2469 int orig_oif;
2463 2470
2464 res.fi = NULL; 2471 res.fi = NULL;
2465#ifdef CONFIG_IP_MULTIPLE_TABLES 2472#ifdef CONFIG_IP_MULTIPLE_TABLES
2466 res.r = NULL; 2473 res.r = NULL;
2467#endif 2474#endif
2468 2475
2469 fl4.flowi4_oif = oldflp4->flowi4_oif; 2476 orig_daddr = fl4->daddr;
2470 fl4.flowi4_iif = net->loopback_dev->ifindex; 2477 orig_saddr = fl4->saddr;
2471 fl4.flowi4_mark = oldflp4->flowi4_mark; 2478 orig_oif = fl4->flowi4_oif;
2472 fl4.daddr = oldflp4->daddr; 2479
2473 fl4.saddr = oldflp4->saddr; 2480 fl4->flowi4_iif = net->loopback_dev->ifindex;
2474 fl4.flowi4_tos = tos & IPTOS_RT_MASK; 2481 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2475 fl4.flowi4_scope = ((tos & RTO_ONLINK) ? 2482 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2476 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); 2483 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2477 2484
2478 rcu_read_lock(); 2485 rcu_read_lock();
2479 if (oldflp4->saddr) { 2486 if (fl4->saddr) {
2480 rth = ERR_PTR(-EINVAL); 2487 rth = ERR_PTR(-EINVAL);
2481 if (ipv4_is_multicast(oldflp4->saddr) || 2488 if (ipv4_is_multicast(fl4->saddr) ||
2482 ipv4_is_lbcast(oldflp4->saddr) || 2489 ipv4_is_lbcast(fl4->saddr) ||
2483 ipv4_is_zeronet(oldflp4->saddr)) 2490 ipv4_is_zeronet(fl4->saddr))
2484 goto out; 2491 goto out;
2485 2492
2486 /* I removed check for oif == dev_out->oif here. 2493 /* I removed check for oif == dev_out->oif here.
@@ -2491,11 +2498,11 @@ static struct rtable *ip_route_output_slow(struct net *net,
2491 of another iface. --ANK 2498 of another iface. --ANK
2492 */ 2499 */
2493 2500
2494 if (oldflp4->flowi4_oif == 0 && 2501 if (fl4->flowi4_oif == 0 &&
2495 (ipv4_is_multicast(oldflp4->daddr) || 2502 (ipv4_is_multicast(fl4->daddr) ||
2496 ipv4_is_lbcast(oldflp4->daddr))) { 2503 ipv4_is_lbcast(fl4->daddr))) {
2497 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2504 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2498 dev_out = __ip_dev_find(net, oldflp4->saddr, false); 2505 dev_out = __ip_dev_find(net, fl4->saddr, false);
2499 if (dev_out == NULL) 2506 if (dev_out == NULL)
2500 goto out; 2507 goto out;
2501 2508
@@ -2514,20 +2521,20 @@ static struct rtable *ip_route_output_slow(struct net *net,
2514 Luckily, this hack is good workaround. 2521 Luckily, this hack is good workaround.
2515 */ 2522 */
2516 2523
2517 fl4.flowi4_oif = dev_out->ifindex; 2524 fl4->flowi4_oif = dev_out->ifindex;
2518 goto make_route; 2525 goto make_route;
2519 } 2526 }
2520 2527
2521 if (!(oldflp4->flowi4_flags & FLOWI_FLAG_ANYSRC)) { 2528 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2522 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2529 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2523 if (!__ip_dev_find(net, oldflp4->saddr, false)) 2530 if (!__ip_dev_find(net, fl4->saddr, false))
2524 goto out; 2531 goto out;
2525 } 2532 }
2526 } 2533 }
2527 2534
2528 2535
2529 if (oldflp4->flowi4_oif) { 2536 if (fl4->flowi4_oif) {
2530 dev_out = dev_get_by_index_rcu(net, oldflp4->flowi4_oif); 2537 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2531 rth = ERR_PTR(-ENODEV); 2538 rth = ERR_PTR(-ENODEV);
2532 if (dev_out == NULL) 2539 if (dev_out == NULL)
2533 goto out; 2540 goto out;
@@ -2537,37 +2544,37 @@ static struct rtable *ip_route_output_slow(struct net *net,
2537 rth = ERR_PTR(-ENETUNREACH); 2544 rth = ERR_PTR(-ENETUNREACH);
2538 goto out; 2545 goto out;
2539 } 2546 }
2540 if (ipv4_is_local_multicast(oldflp4->daddr) || 2547 if (ipv4_is_local_multicast(fl4->daddr) ||
2541 ipv4_is_lbcast(oldflp4->daddr)) { 2548 ipv4_is_lbcast(fl4->daddr)) {
2542 if (!fl4.saddr) 2549 if (!fl4->saddr)
2543 fl4.saddr = inet_select_addr(dev_out, 0, 2550 fl4->saddr = inet_select_addr(dev_out, 0,
2544 RT_SCOPE_LINK); 2551 RT_SCOPE_LINK);
2545 goto make_route; 2552 goto make_route;
2546 } 2553 }
2547 if (!fl4.saddr) { 2554 if (fl4->saddr) {
2548 if (ipv4_is_multicast(oldflp4->daddr)) 2555 if (ipv4_is_multicast(fl4->daddr))
2549 fl4.saddr = inet_select_addr(dev_out, 0, 2556 fl4->saddr = inet_select_addr(dev_out, 0,
2550 fl4.flowi4_scope); 2557 fl4->flowi4_scope);
2551 else if (!oldflp4->daddr) 2558 else if (!fl4->daddr)
2552 fl4.saddr = inet_select_addr(dev_out, 0, 2559 fl4->saddr = inet_select_addr(dev_out, 0,
2553 RT_SCOPE_HOST); 2560 RT_SCOPE_HOST);
2554 } 2561 }
2555 } 2562 }
2556 2563
2557 if (!fl4.daddr) { 2564 if (!fl4->daddr) {
2558 fl4.daddr = fl4.saddr; 2565 fl4->daddr = fl4->saddr;
2559 if (!fl4.daddr) 2566 if (!fl4->daddr)
2560 fl4.daddr = fl4.saddr = htonl(INADDR_LOOPBACK); 2567 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2561 dev_out = net->loopback_dev; 2568 dev_out = net->loopback_dev;
2562 fl4.flowi4_oif = net->loopback_dev->ifindex; 2569 fl4->flowi4_oif = net->loopback_dev->ifindex;
2563 res.type = RTN_LOCAL; 2570 res.type = RTN_LOCAL;
2564 flags |= RTCF_LOCAL; 2571 flags |= RTCF_LOCAL;
2565 goto make_route; 2572 goto make_route;
2566 } 2573 }
2567 2574
2568 if (fib_lookup(net, &fl4, &res)) { 2575 if (fib_lookup(net, fl4, &res)) {
2569 res.fi = NULL; 2576 res.fi = NULL;
2570 if (oldflp4->flowi4_oif) { 2577 if (fl4->flowi4_oif) {
2571 /* Apparently, routing tables are wrong. Assume, 2578 /* Apparently, routing tables are wrong. Assume,
2572 that the destination is on link. 2579 that the destination is on link.
2573 2580
@@ -2586,9 +2593,9 @@ static struct rtable *ip_route_output_slow(struct net *net,
2586 likely IPv6, but we do not. 2593 likely IPv6, but we do not.
2587 */ 2594 */
2588 2595
2589 if (fl4.saddr == 0) 2596 if (fl4->saddr == 0)
2590 fl4.saddr = inet_select_addr(dev_out, 0, 2597 fl4->saddr = inet_select_addr(dev_out, 0,
2591 RT_SCOPE_LINK); 2598 RT_SCOPE_LINK);
2592 res.type = RTN_UNICAST; 2599 res.type = RTN_UNICAST;
2593 goto make_route; 2600 goto make_route;
2594 } 2601 }
@@ -2597,42 +2604,45 @@ static struct rtable *ip_route_output_slow(struct net *net,
2597 } 2604 }
2598 2605
2599 if (res.type == RTN_LOCAL) { 2606 if (res.type == RTN_LOCAL) {
2600 if (!fl4.saddr) { 2607 if (!fl4->saddr) {
2601 if (res.fi->fib_prefsrc) 2608 if (res.fi->fib_prefsrc)
2602 fl4.saddr = res.fi->fib_prefsrc; 2609 fl4->saddr = res.fi->fib_prefsrc;
2603 else 2610 else
2604 fl4.saddr = fl4.daddr; 2611 fl4->saddr = fl4->daddr;
2605 } 2612 }
2606 dev_out = net->loopback_dev; 2613 dev_out = net->loopback_dev;
2607 fl4.flowi4_oif = dev_out->ifindex; 2614 fl4->flowi4_oif = dev_out->ifindex;
2608 res.fi = NULL; 2615 res.fi = NULL;
2609 flags |= RTCF_LOCAL; 2616 flags |= RTCF_LOCAL;
2610 goto make_route; 2617 goto make_route;
2611 } 2618 }
2612 2619
2613#ifdef CONFIG_IP_ROUTE_MULTIPATH 2620#ifdef CONFIG_IP_ROUTE_MULTIPATH
2614 if (res.fi->fib_nhs > 1 && fl4.flowi4_oif == 0) 2621 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2615 fib_select_multipath(&res); 2622 fib_select_multipath(&res);
2616 else 2623 else
2617#endif 2624#endif
2618 if (!res.prefixlen && res.type == RTN_UNICAST && !fl4.flowi4_oif) 2625 if (!res.prefixlen &&
2626 res.table->tb_num_default > 1 &&
2627 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2619 fib_select_default(&res); 2628 fib_select_default(&res);
2620 2629
2621 if (!fl4.saddr) 2630 if (!fl4->saddr)
2622 fl4.saddr = FIB_RES_PREFSRC(net, res); 2631 fl4->saddr = FIB_RES_PREFSRC(net, res);
2623 2632
2624 dev_out = FIB_RES_DEV(res); 2633 dev_out = FIB_RES_DEV(res);
2625 fl4.flowi4_oif = dev_out->ifindex; 2634 fl4->flowi4_oif = dev_out->ifindex;
2626 2635
2627 2636
2628make_route: 2637make_route:
2629 rth = __mkroute_output(&res, &fl4, oldflp4, dev_out, flags); 2638 rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2639 dev_out, flags);
2630 if (!IS_ERR(rth)) { 2640 if (!IS_ERR(rth)) {
2631 unsigned int hash; 2641 unsigned int hash;
2632 2642
2633 hash = rt_hash(oldflp4->daddr, oldflp4->saddr, oldflp4->flowi4_oif, 2643 hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
2634 rt_genid(dev_net(dev_out))); 2644 rt_genid(dev_net(dev_out)));
2635 rth = rt_intern_hash(hash, rth, NULL, oldflp4->flowi4_oif); 2645 rth = rt_intern_hash(hash, rth, NULL, orig_oif);
2636 } 2646 }
2637 2647
2638out: 2648out:
@@ -2640,7 +2650,7 @@ out:
2640 return rth; 2650 return rth;
2641} 2651}
2642 2652
2643struct rtable *__ip_route_output_key(struct net *net, const struct flowi4 *flp4) 2653struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2644{ 2654{
2645 struct rtable *rth; 2655 struct rtable *rth;
2646 unsigned int hash; 2656 unsigned int hash;
@@ -2658,13 +2668,17 @@ struct rtable *__ip_route_output_key(struct net *net, const struct flowi4 *flp4)
2658 rt_is_output_route(rth) && 2668 rt_is_output_route(rth) &&
2659 rth->rt_oif == flp4->flowi4_oif && 2669 rth->rt_oif == flp4->flowi4_oif &&
2660 rth->rt_mark == flp4->flowi4_mark && 2670 rth->rt_mark == flp4->flowi4_mark &&
2661 !((rth->rt_tos ^ flp4->flowi4_tos) & 2671 !((rth->rt_key_tos ^ flp4->flowi4_tos) &
2662 (IPTOS_RT_MASK | RTO_ONLINK)) && 2672 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2663 net_eq(dev_net(rth->dst.dev), net) && 2673 net_eq(dev_net(rth->dst.dev), net) &&
2664 !rt_is_expired(rth)) { 2674 !rt_is_expired(rth)) {
2665 dst_use(&rth->dst, jiffies); 2675 dst_use(&rth->dst, jiffies);
2666 RT_CACHE_STAT_INC(out_hit); 2676 RT_CACHE_STAT_INC(out_hit);
2667 rcu_read_unlock_bh(); 2677 rcu_read_unlock_bh();
2678 if (!flp4->saddr)
2679 flp4->saddr = rth->rt_src;
2680 if (!flp4->daddr)
2681 flp4->daddr = rth->rt_dst;
2668 return rth; 2682 return rth;
2669 } 2683 }
2670 RT_CACHE_STAT_INC(out_hlist_search); 2684 RT_CACHE_STAT_INC(out_hlist_search);
@@ -2709,7 +2723,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2709 2723
2710struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) 2724struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2711{ 2725{
2712 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, 1); 2726 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2713 struct rtable *ort = (struct rtable *) dst_orig; 2727 struct rtable *ort = (struct rtable *) dst_orig;
2714 2728
2715 if (rt) { 2729 if (rt) {
@@ -2726,7 +2740,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2726 2740
2727 rt->rt_key_dst = ort->rt_key_dst; 2741 rt->rt_key_dst = ort->rt_key_dst;
2728 rt->rt_key_src = ort->rt_key_src; 2742 rt->rt_key_src = ort->rt_key_src;
2729 rt->rt_tos = ort->rt_tos; 2743 rt->rt_key_tos = ort->rt_key_tos;
2730 rt->rt_route_iif = ort->rt_route_iif; 2744 rt->rt_route_iif = ort->rt_route_iif;
2731 rt->rt_iif = ort->rt_iif; 2745 rt->rt_iif = ort->rt_iif;
2732 rt->rt_oif = ort->rt_oif; 2746 rt->rt_oif = ort->rt_oif;
@@ -2762,15 +2776,10 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2762 if (IS_ERR(rt)) 2776 if (IS_ERR(rt))
2763 return rt; 2777 return rt;
2764 2778
2765 if (flp4->flowi4_proto) { 2779 if (flp4->flowi4_proto)
2766 if (!flp4->saddr)
2767 flp4->saddr = rt->rt_src;
2768 if (!flp4->daddr)
2769 flp4->daddr = rt->rt_dst;
2770 rt = (struct rtable *) xfrm_lookup(net, &rt->dst, 2780 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2771 flowi4_to_flowi(flp4), 2781 flowi4_to_flowi(flp4),
2772 sk, 0); 2782 sk, 0);
2773 }
2774 2783
2775 return rt; 2784 return rt;
2776} 2785}
@@ -2794,7 +2803,7 @@ static int rt_fill_info(struct net *net,
2794 r->rtm_family = AF_INET; 2803 r->rtm_family = AF_INET;
2795 r->rtm_dst_len = 32; 2804 r->rtm_dst_len = 32;
2796 r->rtm_src_len = 0; 2805 r->rtm_src_len = 0;
2797 r->rtm_tos = rt->rt_tos; 2806 r->rtm_tos = rt->rt_key_tos;
2798 r->rtm_table = RT_TABLE_MAIN; 2807 r->rtm_table = RT_TABLE_MAIN;
2799 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); 2808 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2800 r->rtm_type = rt->rt_type; 2809 r->rtm_type = rt->rt_type;
@@ -2848,7 +2857,9 @@ static int rt_fill_info(struct net *net,
2848 2857
2849 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && 2858 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2850 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { 2859 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2851 int err = ipmr_get_route(net, skb, r, nowait); 2860 int err = ipmr_get_route(net, skb,
2861 rt->rt_src, rt->rt_dst,
2862 r, nowait);
2852 if (err <= 0) { 2863 if (err <= 0) {
2853 if (!nowait) { 2864 if (!nowait) {
2854 if (err == 0) 2865 if (err == 0)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 8b44c6d2a79b..26461492a847 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -321,10 +321,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
321 * the ACK carries the same options again (see RFC1122 4.2.3.8) 321 * the ACK carries the same options again (see RFC1122 4.2.3.8)
322 */ 322 */
323 if (opt && opt->optlen) { 323 if (opt && opt->optlen) {
324 int opt_size = sizeof(struct ip_options) + opt->optlen; 324 int opt_size = sizeof(struct ip_options_rcu) + opt->optlen;
325 325
326 ireq->opt = kmalloc(opt_size, GFP_ATOMIC); 326 ireq->opt = kmalloc(opt_size, GFP_ATOMIC);
327 if (ireq->opt != NULL && ip_options_echo(ireq->opt, skb)) { 327 if (ireq->opt != NULL && ip_options_echo(&ireq->opt->opt, skb)) {
328 kfree(ireq->opt); 328 kfree(ireq->opt);
329 ireq->opt = NULL; 329 ireq->opt = NULL;
330 } 330 }
@@ -345,17 +345,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
345 * no easy way to do this. 345 * no easy way to do this.
346 */ 346 */
347 { 347 {
348 struct flowi4 fl4 = { 348 struct flowi4 fl4;
349 .flowi4_mark = sk->sk_mark, 349
350 .daddr = ((opt && opt->srr) ? 350 flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
351 opt->faddr : ireq->rmt_addr), 351 RT_SCOPE_UNIVERSE, IPPROTO_TCP,
352 .saddr = ireq->loc_addr, 352 inet_sk_flowi_flags(sk),
353 .flowi4_tos = RT_CONN_FLAGS(sk), 353 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
354 .flowi4_proto = IPPROTO_TCP, 354 ireq->loc_addr, th->source, th->dest);
355 .flowi4_flags = inet_sk_flowi_flags(sk),
356 .fl4_sport = th->dest,
357 .fl4_dport = th->source,
358 };
359 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 355 security_req_classify_flow(req, flowi4_to_flowi(&fl4));
360 rt = ip_route_output_key(sock_net(sk), &fl4); 356 rt = ip_route_output_key(sock_net(sk), &fl4);
361 if (IS_ERR(rt)) { 357 if (IS_ERR(rt)) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b22d45010545..054a59d21eb0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -999,7 +999,8 @@ new_segment:
999 /* We have some space in skb head. Superb! */ 999 /* We have some space in skb head. Superb! */
1000 if (copy > skb_tailroom(skb)) 1000 if (copy > skb_tailroom(skb))
1001 copy = skb_tailroom(skb); 1001 copy = skb_tailroom(skb);
1002 if ((err = skb_add_data(skb, from, copy)) != 0) 1002 err = skb_add_data_nocache(sk, skb, from, copy);
1003 if (err)
1003 goto do_fault; 1004 goto do_fault;
1004 } else { 1005 } else {
1005 int merge = 0; 1006 int merge = 0;
@@ -1042,8 +1043,8 @@ new_segment:
1042 1043
1043 /* Time to copy data. We are close to 1044 /* Time to copy data. We are close to
1044 * the end! */ 1045 * the end! */
1045 err = skb_copy_to_page(sk, from, skb, page, 1046 err = skb_copy_to_page_nocache(sk, from, skb,
1046 off, copy); 1047 page, off, copy);
1047 if (err) { 1048 if (err) {
1048 /* If this page was new, give it to the 1049 /* If this page was new, give it to the
1049 * socket so it does not get leaked. 1050 * socket so it does not get leaked.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f7e6c2c2d2bb..f3d16d8918c7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -146,13 +146,15 @@ EXPORT_SYMBOL_GPL(tcp_twsk_unique);
146/* This will initiate an outgoing connection. */ 146/* This will initiate an outgoing connection. */
147int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 147int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
148{ 148{
149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
149 struct inet_sock *inet = inet_sk(sk); 150 struct inet_sock *inet = inet_sk(sk);
150 struct tcp_sock *tp = tcp_sk(sk); 151 struct tcp_sock *tp = tcp_sk(sk);
151 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
152 __be16 orig_sport, orig_dport; 152 __be16 orig_sport, orig_dport;
153 struct rtable *rt;
154 __be32 daddr, nexthop; 153 __be32 daddr, nexthop;
154 struct flowi4 fl4;
155 struct rtable *rt;
155 int err; 156 int err;
157 struct ip_options_rcu *inet_opt;
156 158
157 if (addr_len < sizeof(struct sockaddr_in)) 159 if (addr_len < sizeof(struct sockaddr_in))
158 return -EINVAL; 160 return -EINVAL;
@@ -161,15 +163,17 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
161 return -EAFNOSUPPORT; 163 return -EAFNOSUPPORT;
162 164
163 nexthop = daddr = usin->sin_addr.s_addr; 165 nexthop = daddr = usin->sin_addr.s_addr;
164 if (inet->opt && inet->opt->srr) { 166 inet_opt = rcu_dereference_protected(inet->inet_opt,
167 sock_owned_by_user(sk));
168 if (inet_opt && inet_opt->opt.srr) {
165 if (!daddr) 169 if (!daddr)
166 return -EINVAL; 170 return -EINVAL;
167 nexthop = inet->opt->faddr; 171 nexthop = inet_opt->opt.faddr;
168 } 172 }
169 173
170 orig_sport = inet->inet_sport; 174 orig_sport = inet->inet_sport;
171 orig_dport = usin->sin_port; 175 orig_dport = usin->sin_port;
172 rt = ip_route_connect(nexthop, inet->inet_saddr, 176 rt = ip_route_connect(&fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 177 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 IPPROTO_TCP, 178 IPPROTO_TCP,
175 orig_sport, orig_dport, sk, true); 179 orig_sport, orig_dport, sk, true);
@@ -185,11 +189,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
185 return -ENETUNREACH; 189 return -ENETUNREACH;
186 } 190 }
187 191
188 if (!inet->opt || !inet->opt->srr) 192 if (!inet_opt || !inet_opt->opt.srr)
189 daddr = rt->rt_dst; 193 daddr = fl4.daddr;
190 194
191 if (!inet->inet_saddr) 195 if (!inet->inet_saddr)
192 inet->inet_saddr = rt->rt_src; 196 inet->inet_saddr = fl4.saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr; 197 inet->inet_rcv_saddr = inet->inet_saddr;
194 198
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { 199 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
@@ -200,7 +204,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
200 } 204 }
201 205
202 if (tcp_death_row.sysctl_tw_recycle && 206 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { 207 !tp->rx_opt.ts_recent_stamp && fl4.daddr == daddr) {
204 struct inet_peer *peer = rt_get_peer(rt); 208 struct inet_peer *peer = rt_get_peer(rt);
205 /* 209 /*
206 * VJ's idea. We save last timestamp seen from 210 * VJ's idea. We save last timestamp seen from
@@ -221,8 +225,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
221 inet->inet_daddr = daddr; 225 inet->inet_daddr = daddr;
222 226
223 inet_csk(sk)->icsk_ext_hdr_len = 0; 227 inet_csk(sk)->icsk_ext_hdr_len = 0;
224 if (inet->opt) 228 if (inet_opt)
225 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; 229 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
226 230
227 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; 231 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
228 232
@@ -236,8 +240,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
236 if (err) 240 if (err)
237 goto failure; 241 goto failure;
238 242
239 rt = ip_route_newports(rt, IPPROTO_TCP, 243 rt = ip_route_newports(&fl4, rt, orig_sport, orig_dport,
240 orig_sport, orig_dport,
241 inet->inet_sport, inet->inet_dport, sk); 244 inet->inet_sport, inet->inet_dport, sk);
242 if (IS_ERR(rt)) { 245 if (IS_ERR(rt)) {
243 err = PTR_ERR(rt); 246 err = PTR_ERR(rt);
@@ -279,7 +282,7 @@ EXPORT_SYMBOL(tcp_v4_connect);
279/* 282/*
280 * This routine does path mtu discovery as defined in RFC1191. 283 * This routine does path mtu discovery as defined in RFC1191.
281 */ 284 */
282static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu) 285static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
283{ 286{
284 struct dst_entry *dst; 287 struct dst_entry *dst;
285 struct inet_sock *inet = inet_sk(sk); 288 struct inet_sock *inet = inet_sk(sk);
@@ -341,7 +344,7 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
341 344
342void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) 345void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
343{ 346{
344 struct iphdr *iph = (struct iphdr *)icmp_skb->data; 347 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
345 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); 348 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
346 struct inet_connection_sock *icsk; 349 struct inet_connection_sock *icsk;
347 struct tcp_sock *tp; 350 struct tcp_sock *tp;
@@ -820,17 +823,18 @@ static void syn_flood_warning(const struct sk_buff *skb)
820/* 823/*
821 * Save and compile IPv4 options into the request_sock if needed. 824 * Save and compile IPv4 options into the request_sock if needed.
822 */ 825 */
823static struct ip_options *tcp_v4_save_options(struct sock *sk, 826static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
824 struct sk_buff *skb) 827 struct sk_buff *skb)
825{ 828{
826 struct ip_options *opt = &(IPCB(skb)->opt); 829 const struct ip_options *opt = &(IPCB(skb)->opt);
827 struct ip_options *dopt = NULL; 830 struct ip_options_rcu *dopt = NULL;
828 831
829 if (opt && opt->optlen) { 832 if (opt && opt->optlen) {
830 int opt_size = optlength(opt); 833 int opt_size = sizeof(*dopt) + opt->optlen;
834
831 dopt = kmalloc(opt_size, GFP_ATOMIC); 835 dopt = kmalloc(opt_size, GFP_ATOMIC);
832 if (dopt) { 836 if (dopt) {
833 if (ip_options_echo(dopt, skb)) { 837 if (ip_options_echo(&dopt->opt, skb)) {
834 kfree(dopt); 838 kfree(dopt);
835 dopt = NULL; 839 dopt = NULL;
836 } 840 }
@@ -1411,6 +1415,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1411#ifdef CONFIG_TCP_MD5SIG 1415#ifdef CONFIG_TCP_MD5SIG
1412 struct tcp_md5sig_key *key; 1416 struct tcp_md5sig_key *key;
1413#endif 1417#endif
1418 struct ip_options_rcu *inet_opt;
1414 1419
1415 if (sk_acceptq_is_full(sk)) 1420 if (sk_acceptq_is_full(sk))
1416 goto exit_overflow; 1421 goto exit_overflow;
@@ -1431,13 +1436,14 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1431 newinet->inet_daddr = ireq->rmt_addr; 1436 newinet->inet_daddr = ireq->rmt_addr;
1432 newinet->inet_rcv_saddr = ireq->loc_addr; 1437 newinet->inet_rcv_saddr = ireq->loc_addr;
1433 newinet->inet_saddr = ireq->loc_addr; 1438 newinet->inet_saddr = ireq->loc_addr;
1434 newinet->opt = ireq->opt; 1439 inet_opt = ireq->opt;
1440 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1435 ireq->opt = NULL; 1441 ireq->opt = NULL;
1436 newinet->mc_index = inet_iif(skb); 1442 newinet->mc_index = inet_iif(skb);
1437 newinet->mc_ttl = ip_hdr(skb)->ttl; 1443 newinet->mc_ttl = ip_hdr(skb)->ttl;
1438 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1444 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1439 if (newinet->opt) 1445 if (inet_opt)
1440 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen; 1446 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1441 newinet->inet_id = newtp->write_seq ^ jiffies; 1447 newinet->inet_id = newtp->write_seq ^ jiffies;
1442 1448
1443 tcp_mtup_init(newsk); 1449 tcp_mtup_init(newsk);
@@ -2527,7 +2533,7 @@ void tcp4_proc_exit(void)
2527 2533
2528struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2534struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2529{ 2535{
2530 struct iphdr *iph = skb_gro_network_header(skb); 2536 const struct iphdr *iph = skb_gro_network_header(skb);
2531 2537
2532 switch (skb->ip_summed) { 2538 switch (skb->ip_summed) {
2533 case CHECKSUM_COMPLETE: 2539 case CHECKSUM_COMPLETE:
@@ -2548,7 +2554,7 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2548 2554
2549int tcp4_gro_complete(struct sk_buff *skb) 2555int tcp4_gro_complete(struct sk_buff *skb)
2550{ 2556{
2551 struct iphdr *iph = ip_hdr(skb); 2557 const struct iphdr *iph = ip_hdr(skb);
2552 struct tcphdr *th = tcp_hdr(skb); 2558 struct tcphdr *th = tcp_hdr(skb);
2553 2559
2554 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), 2560 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f87a8eb76f3b..544f435d1aff 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -578,7 +578,7 @@ found:
578void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) 578void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
579{ 579{
580 struct inet_sock *inet; 580 struct inet_sock *inet;
581 struct iphdr *iph = (struct iphdr *)skb->data; 581 const struct iphdr *iph = (const struct iphdr *)skb->data;
582 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); 582 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
583 const int type = icmp_hdr(skb)->type; 583 const int type = icmp_hdr(skb)->type;
584 const int code = icmp_hdr(skb)->code; 584 const int code = icmp_hdr(skb)->code;
@@ -804,6 +804,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
804 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 804 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
805 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 805 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
806 struct sk_buff *skb; 806 struct sk_buff *skb;
807 struct ip_options_data opt_copy;
807 808
808 if (len > 0xFFFF) 809 if (len > 0xFFFF)
809 return -EMSGSIZE; 810 return -EMSGSIZE;
@@ -877,22 +878,32 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
877 free = 1; 878 free = 1;
878 connected = 0; 879 connected = 0;
879 } 880 }
880 if (!ipc.opt) 881 if (!ipc.opt) {
881 ipc.opt = inet->opt; 882 struct ip_options_rcu *inet_opt;
883
884 rcu_read_lock();
885 inet_opt = rcu_dereference(inet->inet_opt);
886 if (inet_opt) {
887 memcpy(&opt_copy, inet_opt,
888 sizeof(*inet_opt) + inet_opt->opt.optlen);
889 ipc.opt = &opt_copy.opt;
890 }
891 rcu_read_unlock();
892 }
882 893
883 saddr = ipc.addr; 894 saddr = ipc.addr;
884 ipc.addr = faddr = daddr; 895 ipc.addr = faddr = daddr;
885 896
886 if (ipc.opt && ipc.opt->srr) { 897 if (ipc.opt && ipc.opt->opt.srr) {
887 if (!daddr) 898 if (!daddr)
888 return -EINVAL; 899 return -EINVAL;
889 faddr = ipc.opt->faddr; 900 faddr = ipc.opt->opt.faddr;
890 connected = 0; 901 connected = 0;
891 } 902 }
892 tos = RT_TOS(inet->tos); 903 tos = RT_TOS(inet->tos);
893 if (sock_flag(sk, SOCK_LOCALROUTE) || 904 if (sock_flag(sk, SOCK_LOCALROUTE) ||
894 (msg->msg_flags & MSG_DONTROUTE) || 905 (msg->msg_flags & MSG_DONTROUTE) ||
895 (ipc.opt && ipc.opt->is_strictroute)) { 906 (ipc.opt && ipc.opt->opt.is_strictroute)) {
896 tos |= RTO_ONLINK; 907 tos |= RTO_ONLINK;
897 connected = 0; 908 connected = 0;
898 } 909 }
@@ -909,20 +920,14 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
909 rt = (struct rtable *)sk_dst_check(sk, 0); 920 rt = (struct rtable *)sk_dst_check(sk, 0);
910 921
911 if (rt == NULL) { 922 if (rt == NULL) {
912 struct flowi4 fl4 = { 923 struct flowi4 fl4;
913 .flowi4_oif = ipc.oif,
914 .flowi4_mark = sk->sk_mark,
915 .daddr = faddr,
916 .saddr = saddr,
917 .flowi4_tos = tos,
918 .flowi4_proto = sk->sk_protocol,
919 .flowi4_flags = (inet_sk_flowi_flags(sk) |
920 FLOWI_FLAG_CAN_SLEEP),
921 .fl4_sport = inet->inet_sport,
922 .fl4_dport = dport,
923 };
924 struct net *net = sock_net(sk); 924 struct net *net = sock_net(sk);
925 925
926 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
927 RT_SCOPE_UNIVERSE, sk->sk_protocol,
928 inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
929 faddr, saddr, dport, inet->inet_sport);
930
926 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); 931 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
927 rt = ip_route_output_flow(net, &fl4, sk); 932 rt = ip_route_output_flow(net, &fl4, sk);
928 if (IS_ERR(rt)) { 933 if (IS_ERR(rt)) {
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index d20a05e970d8..7ff973bd02dd 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -73,7 +73,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
73 73
74 rt->rt_key_dst = fl4->daddr; 74 rt->rt_key_dst = fl4->daddr;
75 rt->rt_key_src = fl4->saddr; 75 rt->rt_key_src = fl4->saddr;
76 rt->rt_tos = fl4->flowi4_tos; 76 rt->rt_key_tos = fl4->flowi4_tos;
77 rt->rt_route_iif = fl4->flowi4_iif; 77 rt->rt_route_iif = fl4->flowi4_iif;
78 rt->rt_iif = fl4->flowi4_iif; 78 rt->rt_iif = fl4->flowi4_iif;
79 rt->rt_oif = fl4->flowi4_oif; 79 rt->rt_oif = fl4->flowi4_oif;
@@ -102,7 +102,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
102static void 102static void
103_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) 103_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104{ 104{
105 struct iphdr *iph = ip_hdr(skb); 105 const struct iphdr *iph = ip_hdr(skb);
106 u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 106 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
107 struct flowi4 *fl4 = &fl->u.ip4; 107 struct flowi4 *fl4 = &fl->u.ip4;
108 108
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 1717c64628d1..ea983ae96ae6 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -55,7 +55,7 @@ xfrm4_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
55 55
56int xfrm4_extract_header(struct sk_buff *skb) 56int xfrm4_extract_header(struct sk_buff *skb)
57{ 57{
58 struct iphdr *iph = ip_hdr(skb); 58 const struct iphdr *iph = ip_hdr(skb);
59 59
60 XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph); 60 XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph);
61 XFRM_MODE_SKB_CB(skb)->id = iph->id; 61 XFRM_MODE_SKB_CB(skb)->id = iph->id;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a7bda0757053..f2f9b2e3cfe9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -825,6 +825,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
825 dst_release(&rt->dst); 825 dst_release(&rt->dst);
826 } 826 }
827 827
828 /* clean up prefsrc entries */
829 rt6_remove_prefsrc(ifp);
828out: 830out:
829 in6_ifa_put(ifp); 831 in6_ifa_put(ifp);
830} 832}
@@ -1281,7 +1283,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
1281 return cnt; 1283 return cnt;
1282} 1284}
1283 1285
1284int ipv6_chk_addr(struct net *net, struct in6_addr *addr, 1286int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1285 struct net_device *dev, int strict) 1287 struct net_device *dev, int strict)
1286{ 1288{
1287 struct inet6_ifaddr *ifp; 1289 struct inet6_ifaddr *ifp;
@@ -1324,7 +1326,7 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1324 return false; 1326 return false;
1325} 1327}
1326 1328
1327int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) 1329int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1328{ 1330{
1329 struct inet6_dev *idev; 1331 struct inet6_dev *idev;
1330 struct inet6_ifaddr *ifa; 1332 struct inet6_ifaddr *ifa;
@@ -1455,7 +1457,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1455 1457
1456/* Join to solicited addr multicast group. */ 1458/* Join to solicited addr multicast group. */
1457 1459
1458void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr) 1460void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
1459{ 1461{
1460 struct in6_addr maddr; 1462 struct in6_addr maddr;
1461 1463
@@ -1466,7 +1468,7 @@ void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr)
1466 ipv6_dev_mc_inc(dev, &maddr); 1468 ipv6_dev_mc_inc(dev, &maddr);
1467} 1469}
1468 1470
1469void addrconf_leave_solict(struct inet6_dev *idev, struct in6_addr *addr) 1471void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
1470{ 1472{
1471 struct in6_addr maddr; 1473 struct in6_addr maddr;
1472 1474
@@ -2111,7 +2113,7 @@ err_exit:
2111/* 2113/*
2112 * Manual configuration of address on an interface 2114 * Manual configuration of address on an interface
2113 */ 2115 */
2114static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx, 2116static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
2115 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft, 2117 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
2116 __u32 valid_lft) 2118 __u32 valid_lft)
2117{ 2119{
@@ -2185,7 +2187,7 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2185 return PTR_ERR(ifp); 2187 return PTR_ERR(ifp);
2186} 2188}
2187 2189
2188static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, 2190static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *pfx,
2189 unsigned int plen) 2191 unsigned int plen)
2190{ 2192{
2191 struct inet6_ifaddr *ifp; 2193 struct inet6_ifaddr *ifp;
@@ -2348,7 +2350,7 @@ static void init_loopback(struct net_device *dev)
2348 add_addr(idev, &in6addr_loopback, 128, IFA_HOST); 2350 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
2349} 2351}
2350 2352
2351static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr) 2353static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
2352{ 2354{
2353 struct inet6_ifaddr * ifp; 2355 struct inet6_ifaddr * ifp;
2354 u32 addr_flags = IFA_F_PERMANENT; 2356 u32 addr_flags = IFA_F_PERMANENT;
@@ -3119,7 +3121,7 @@ void if6_proc_exit(void)
3119 3121
3120#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 3122#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
3121/* Check if address is a home address configured on any interface. */ 3123/* Check if address is a home address configured on any interface. */
3122int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) 3124int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
3123{ 3125{
3124 int ret = 0; 3126 int ret = 0;
3125 struct inet6_ifaddr *ifp = NULL; 3127 struct inet6_ifaddr *ifp = NULL;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index afcc7099f96d..b7919f901fbf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -740,7 +740,7 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
740 740
741static int ipv6_gso_send_check(struct sk_buff *skb) 741static int ipv6_gso_send_check(struct sk_buff *skb)
742{ 742{
743 struct ipv6hdr *ipv6h; 743 const struct ipv6hdr *ipv6h;
744 const struct inet6_protocol *ops; 744 const struct inet6_protocol *ops;
745 int err = -EINVAL; 745 int err = -EINVAL;
746 746
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 0e5e943446f0..674255f5e6b7 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -44,7 +44,7 @@
44 44
45#include <net/checksum.h> 45#include <net/checksum.h>
46 46
47static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr); 47static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
48 48
49/* Big ac list lock for all the sockets */ 49/* Big ac list lock for all the sockets */
50static DEFINE_RWLOCK(ipv6_sk_ac_lock); 50static DEFINE_RWLOCK(ipv6_sk_ac_lock);
@@ -54,7 +54,7 @@ static DEFINE_RWLOCK(ipv6_sk_ac_lock);
54 * socket join an anycast group 54 * socket join an anycast group
55 */ 55 */
56 56
57int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) 57int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
58{ 58{
59 struct ipv6_pinfo *np = inet6_sk(sk); 59 struct ipv6_pinfo *np = inet6_sk(sk);
60 struct net_device *dev = NULL; 60 struct net_device *dev = NULL;
@@ -145,7 +145,7 @@ error:
145/* 145/*
146 * socket leave an anycast group 146 * socket leave an anycast group
147 */ 147 */
148int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr) 148int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
149{ 149{
150 struct ipv6_pinfo *np = inet6_sk(sk); 150 struct ipv6_pinfo *np = inet6_sk(sk);
151 struct net_device *dev; 151 struct net_device *dev;
@@ -252,7 +252,7 @@ static void aca_put(struct ifacaddr6 *ac)
252/* 252/*
253 * device anycast group inc (add if not found) 253 * device anycast group inc (add if not found)
254 */ 254 */
255int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr) 255int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
256{ 256{
257 struct ifacaddr6 *aca; 257 struct ifacaddr6 *aca;
258 struct inet6_dev *idev; 258 struct inet6_dev *idev;
@@ -324,7 +324,7 @@ out:
324/* 324/*
325 * device anycast group decrement 325 * device anycast group decrement
326 */ 326 */
327int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr) 327int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
328{ 328{
329 struct ifacaddr6 *aca, *prev_aca; 329 struct ifacaddr6 *aca, *prev_aca;
330 330
@@ -358,7 +358,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr)
358} 358}
359 359
360/* called with rcu_read_lock() */ 360/* called with rcu_read_lock() */
361static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) 361static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
362{ 362{
363 struct inet6_dev *idev = __in6_dev_get(dev); 363 struct inet6_dev *idev = __in6_dev_get(dev);
364 364
@@ -371,7 +371,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
371 * check if the interface has this anycast address 371 * check if the interface has this anycast address
372 * called with rcu_read_lock() 372 * called with rcu_read_lock()
373 */ 373 */
374static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr) 374static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
375{ 375{
376 struct inet6_dev *idev; 376 struct inet6_dev *idev;
377 struct ifacaddr6 *aca; 377 struct ifacaddr6 *aca;
@@ -392,7 +392,7 @@ static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr)
392 * check if given interface (or any, if dev==0) has this anycast address 392 * check if given interface (or any, if dev==0) has this anycast address
393 */ 393 */
394int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, 394int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
395 struct in6_addr *addr) 395 const struct in6_addr *addr)
396{ 396{
397 int found = 0; 397 int found = 0;
398 398
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 59dccfbb5b11..1ac7938dd9ec 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -430,7 +430,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
430 u8 type, u8 code, int offset, __be32 info) 430 u8 type, u8 code, int offset, __be32 info)
431{ 431{
432 struct net *net = dev_net(skb->dev); 432 struct net *net = dev_net(skb->dev);
433 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 433 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
434 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); 434 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
435 struct xfrm_state *x; 435 struct xfrm_state *x;
436 436
@@ -438,7 +438,8 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
438 type != ICMPV6_PKT_TOOBIG) 438 type != ICMPV6_PKT_TOOBIG)
439 return; 439 return;
440 440
441 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); 441 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
442 esph->spi, IPPROTO_ESP, AF_INET6);
442 if (!x) 443 if (!x)
443 return; 444 return;
444 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", 445 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 83cb4f9add81..11900417b1cc 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -372,7 +372,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
372 struct ipv6hdr *hdr = ipv6_hdr(skb); 372 struct ipv6hdr *hdr = ipv6_hdr(skb);
373 struct sock *sk; 373 struct sock *sk;
374 struct ipv6_pinfo *np; 374 struct ipv6_pinfo *np;
375 struct in6_addr *saddr = NULL; 375 const struct in6_addr *saddr = NULL;
376 struct dst_entry *dst; 376 struct dst_entry *dst;
377 struct icmp6hdr tmp_hdr; 377 struct icmp6hdr tmp_hdr;
378 struct flowi6 fl6; 378 struct flowi6 fl6;
@@ -521,7 +521,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
521 struct sock *sk; 521 struct sock *sk;
522 struct inet6_dev *idev; 522 struct inet6_dev *idev;
523 struct ipv6_pinfo *np; 523 struct ipv6_pinfo *np;
524 struct in6_addr *saddr = NULL; 524 const struct in6_addr *saddr = NULL;
525 struct icmp6hdr *icmph = icmp6_hdr(skb); 525 struct icmp6hdr *icmph = icmp6_hdr(skb);
526 struct icmp6hdr tmp_hdr; 526 struct icmp6hdr tmp_hdr;
527 struct flowi6 fl6; 527 struct flowi6 fl6;
@@ -645,8 +645,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
645{ 645{
646 struct net_device *dev = skb->dev; 646 struct net_device *dev = skb->dev;
647 struct inet6_dev *idev = __in6_dev_get(dev); 647 struct inet6_dev *idev = __in6_dev_get(dev);
648 struct in6_addr *saddr, *daddr; 648 const struct in6_addr *saddr, *daddr;
649 struct ipv6hdr *orig_hdr; 649 const struct ipv6hdr *orig_hdr;
650 struct icmp6hdr *hdr; 650 struct icmp6hdr *hdr;
651 u8 type; 651 u8 type;
652 652
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 7548905e79e1..4076a0b14b20 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -134,9 +134,9 @@ static __inline__ u32 fib6_new_sernum(void)
134# define BITOP_BE32_SWIZZLE 0 134# define BITOP_BE32_SWIZZLE 0
135#endif 135#endif
136 136
137static __inline__ __be32 addr_bit_set(void *token, int fn_bit) 137static __inline__ __be32 addr_bit_set(const void *token, int fn_bit)
138{ 138{
139 __be32 *addr = token; 139 const __be32 *addr = token;
140 /* 140 /*
141 * Here, 141 * Here,
142 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f) 142 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
@@ -394,10 +394,11 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
394 arg.net = net; 394 arg.net = net;
395 w->args = &arg; 395 w->args = &arg;
396 396
397 rcu_read_lock();
397 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { 398 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
398 e = 0; 399 e = 0;
399 head = &net->ipv6.fib_table_hash[h]; 400 head = &net->ipv6.fib_table_hash[h];
400 hlist_for_each_entry(tb, node, head, tb6_hlist) { 401 hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) {
401 if (e < s_e) 402 if (e < s_e)
402 goto next; 403 goto next;
403 res = fib6_dump_table(tb, skb, cb); 404 res = fib6_dump_table(tb, skb, cb);
@@ -408,6 +409,7 @@ next:
408 } 409 }
409 } 410 }
410out: 411out:
412 rcu_read_unlock();
411 cb->args[1] = e; 413 cb->args[1] = e;
412 cb->args[0] = h; 414 cb->args[0] = h;
413 415
@@ -822,7 +824,7 @@ st_failure:
822 824
823struct lookup_args { 825struct lookup_args {
824 int offset; /* key offset on rt6_info */ 826 int offset; /* key offset on rt6_info */
825 struct in6_addr *addr; /* search key */ 827 const struct in6_addr *addr; /* search key */
826}; 828};
827 829
828static struct fib6_node * fib6_lookup_1(struct fib6_node *root, 830static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
@@ -881,8 +883,8 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
881 return NULL; 883 return NULL;
882} 884}
883 885
884struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr, 886struct fib6_node * fib6_lookup(struct fib6_node *root, const struct in6_addr *daddr,
885 struct in6_addr *saddr) 887 const struct in6_addr *saddr)
886{ 888{
887 struct fib6_node *fn; 889 struct fib6_node *fn;
888 struct lookup_args args[] = { 890 struct lookup_args args[] = {
@@ -916,7 +918,7 @@ struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr,
916 918
917 919
918static struct fib6_node * fib6_locate_1(struct fib6_node *root, 920static struct fib6_node * fib6_locate_1(struct fib6_node *root,
919 struct in6_addr *addr, 921 const struct in6_addr *addr,
920 int plen, int offset) 922 int plen, int offset)
921{ 923{
922 struct fib6_node *fn; 924 struct fib6_node *fn;
@@ -946,8 +948,8 @@ static struct fib6_node * fib6_locate_1(struct fib6_node *root,
946} 948}
947 949
948struct fib6_node * fib6_locate(struct fib6_node *root, 950struct fib6_node * fib6_locate(struct fib6_node *root,
949 struct in6_addr *daddr, int dst_len, 951 const struct in6_addr *daddr, int dst_len,
950 struct in6_addr *saddr, int src_len) 952 const struct in6_addr *saddr, int src_len)
951{ 953{
952 struct fib6_node *fn; 954 struct fib6_node *fn;
953 955
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index a83e9209cecc..027c7ff6f1e5 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -57,7 +57,7 @@ inline int ip6_rcv_finish( struct sk_buff *skb)
57 57
58int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 58int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
59{ 59{
60 struct ipv6hdr *hdr; 60 const struct ipv6hdr *hdr;
61 u32 pkt_len; 61 u32 pkt_len;
62 struct inet6_dev *idev; 62 struct inet6_dev *idev;
63 struct net *net = dev_net(skb->dev); 63 struct net *net = dev_net(skb->dev);
@@ -186,7 +186,7 @@ resubmit:
186 int ret; 186 int ret;
187 187
188 if (ipprot->flags & INET6_PROTO_FINAL) { 188 if (ipprot->flags & INET6_PROTO_FINAL) {
189 struct ipv6hdr *hdr; 189 const struct ipv6hdr *hdr;
190 190
191 /* Free reference early: we don't need it any more, 191 /* Free reference early: we don't need it any more,
192 and it may hold ip_conntrack module loaded 192 and it may hold ip_conntrack module loaded
@@ -242,7 +242,7 @@ int ip6_input(struct sk_buff *skb)
242 242
243int ip6_mc_input(struct sk_buff *skb) 243int ip6_mc_input(struct sk_buff *skb)
244{ 244{
245 struct ipv6hdr *hdr; 245 const struct ipv6hdr *hdr;
246 int deliver; 246 int deliver;
247 247
248 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), 248 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 46cf7bea6769..4cfbb24b9e04 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -869,9 +869,9 @@ fail:
869 return err; 869 return err;
870} 870}
871 871
872static inline int ip6_rt_check(struct rt6key *rt_key, 872static inline int ip6_rt_check(const struct rt6key *rt_key,
873 struct in6_addr *fl_addr, 873 const struct in6_addr *fl_addr,
874 struct in6_addr *addr_cache) 874 const struct in6_addr *addr_cache)
875{ 875{
876 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) && 876 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
877 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)); 877 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
@@ -879,7 +879,7 @@ static inline int ip6_rt_check(struct rt6key *rt_key,
879 879
880static struct dst_entry *ip6_sk_dst_check(struct sock *sk, 880static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
881 struct dst_entry *dst, 881 struct dst_entry *dst,
882 struct flowi6 *fl6) 882 const struct flowi6 *fl6)
883{ 883{
884 struct ipv6_pinfo *np = inet6_sk(sk); 884 struct ipv6_pinfo *np = inet6_sk(sk);
885 struct rt6_info *rt = (struct rt6_info *)dst; 885 struct rt6_info *rt = (struct rt6_info *)dst;
@@ -930,10 +930,10 @@ static int ip6_dst_lookup_tail(struct sock *sk,
930 goto out_err_release; 930 goto out_err_release;
931 931
932 if (ipv6_addr_any(&fl6->saddr)) { 932 if (ipv6_addr_any(&fl6->saddr)) {
933 err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev, 933 struct rt6_info *rt = (struct rt6_info *) *dst;
934 &fl6->daddr, 934 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
935 sk ? inet6_sk(sk)->srcprefs : 0, 935 sk ? inet6_sk(sk)->srcprefs : 0,
936 &fl6->saddr); 936 &fl6->saddr);
937 if (err) 937 if (err)
938 goto out_err_release; 938 goto out_err_release;
939 } 939 }
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c1b1bd312df2..36c2842a86b2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -162,7 +162,7 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
162 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 162 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
163 163
164static struct ip6_tnl * 164static struct ip6_tnl *
165ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) 165ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
166{ 166{
167 unsigned int h0 = HASH(remote); 167 unsigned int h0 = HASH(remote);
168 unsigned int h1 = HASH(local); 168 unsigned int h1 = HASH(local);
@@ -194,10 +194,10 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
194 **/ 194 **/
195 195
196static struct ip6_tnl __rcu ** 196static struct ip6_tnl __rcu **
197ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p) 197ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
198{ 198{
199 struct in6_addr *remote = &p->raddr; 199 const struct in6_addr *remote = &p->raddr;
200 struct in6_addr *local = &p->laddr; 200 const struct in6_addr *local = &p->laddr;
201 unsigned h = 0; 201 unsigned h = 0;
202 int prio = 0; 202 int prio = 0;
203 203
@@ -280,11 +280,6 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
280 280
281 dev_net_set(dev, net); 281 dev_net_set(dev, net);
282 282
283 if (strchr(name, '%')) {
284 if (dev_alloc_name(dev, name) < 0)
285 goto failed_free;
286 }
287
288 t = netdev_priv(dev); 283 t = netdev_priv(dev);
289 t->parms = *p; 284 t->parms = *p;
290 err = ip6_tnl_dev_init(dev); 285 err = ip6_tnl_dev_init(dev);
@@ -321,8 +316,8 @@ failed:
321static struct ip6_tnl *ip6_tnl_locate(struct net *net, 316static struct ip6_tnl *ip6_tnl_locate(struct net *net,
322 struct ip6_tnl_parm *p, int create) 317 struct ip6_tnl_parm *p, int create)
323{ 318{
324 struct in6_addr *remote = &p->raddr; 319 const struct in6_addr *remote = &p->raddr;
325 struct in6_addr *local = &p->laddr; 320 const struct in6_addr *local = &p->laddr;
326 struct ip6_tnl __rcu **tp; 321 struct ip6_tnl __rcu **tp;
327 struct ip6_tnl *t; 322 struct ip6_tnl *t;
328 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 323 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -374,7 +369,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
374static __u16 369static __u16
375parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) 370parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
376{ 371{
377 struct ipv6hdr *ipv6h = (struct ipv6hdr *) raw; 372 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
378 __u8 nexthdr = ipv6h->nexthdr; 373 __u8 nexthdr = ipv6h->nexthdr;
379 __u16 off = sizeof (*ipv6h); 374 __u16 off = sizeof (*ipv6h);
380 375
@@ -435,7 +430,7 @@ static int
435ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 430ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
436 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 431 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
437{ 432{
438 struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data; 433 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
439 struct ip6_tnl *t; 434 struct ip6_tnl *t;
440 int rel_msg = 0; 435 int rel_msg = 0;
441 u8 rel_type = ICMPV6_DEST_UNREACH; 436 u8 rel_type = ICMPV6_DEST_UNREACH;
@@ -535,8 +530,9 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
535 __u32 rel_info = ntohl(info); 530 __u32 rel_info = ntohl(info);
536 int err; 531 int err;
537 struct sk_buff *skb2; 532 struct sk_buff *skb2;
538 struct iphdr *eiph; 533 const struct iphdr *eiph;
539 struct rtable *rt; 534 struct rtable *rt;
535 struct flowi4 fl4;
540 536
541 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 537 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
542 &rel_msg, &rel_info, offset); 538 &rel_msg, &rel_info, offset);
@@ -577,7 +573,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
577 eiph = ip_hdr(skb2); 573 eiph = ip_hdr(skb2);
578 574
579 /* Try to guess incoming interface */ 575 /* Try to guess incoming interface */
580 rt = ip_route_output_ports(dev_net(skb->dev), NULL, 576 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
581 eiph->saddr, 0, 577 eiph->saddr, 0,
582 0, 0, 578 0, 0,
583 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 579 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
@@ -590,7 +586,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
590 if (rt->rt_flags & RTCF_LOCAL) { 586 if (rt->rt_flags & RTCF_LOCAL) {
591 ip_rt_put(rt); 587 ip_rt_put(rt);
592 rt = NULL; 588 rt = NULL;
593 rt = ip_route_output_ports(dev_net(skb->dev), NULL, 589 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
594 eiph->daddr, eiph->saddr, 590 eiph->daddr, eiph->saddr,
595 0, 0, 591 0, 0,
596 IPPROTO_IPIP, 592 IPPROTO_IPIP,
@@ -669,8 +665,8 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
669 return 0; 665 return 0;
670} 666}
671 667
672static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, 668static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
673 struct ipv6hdr *ipv6h, 669 const struct ipv6hdr *ipv6h,
674 struct sk_buff *skb) 670 struct sk_buff *skb)
675{ 671{
676 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 672 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
@@ -682,8 +678,8 @@ static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t,
682 IP_ECN_set_ce(ip_hdr(skb)); 678 IP_ECN_set_ce(ip_hdr(skb));
683} 679}
684 680
685static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, 681static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
686 struct ipv6hdr *ipv6h, 682 const struct ipv6hdr *ipv6h,
687 struct sk_buff *skb) 683 struct sk_buff *skb)
688{ 684{
689 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 685 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
@@ -726,12 +722,12 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
726 722
727static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, 723static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
728 __u8 ipproto, 724 __u8 ipproto,
729 void (*dscp_ecn_decapsulate)(struct ip6_tnl *t, 725 void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
730 struct ipv6hdr *ipv6h, 726 const struct ipv6hdr *ipv6h,
731 struct sk_buff *skb)) 727 struct sk_buff *skb))
732{ 728{
733 struct ip6_tnl *t; 729 struct ip6_tnl *t;
734 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 730 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
735 731
736 rcu_read_lock(); 732 rcu_read_lock();
737 733
@@ -828,7 +824,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
828 **/ 824 **/
829 825
830static inline int 826static inline int
831ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr) 827ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
832{ 828{
833 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 829 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
834} 830}
@@ -1005,7 +1001,7 @@ static inline int
1005ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1001ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1006{ 1002{
1007 struct ip6_tnl *t = netdev_priv(dev); 1003 struct ip6_tnl *t = netdev_priv(dev);
1008 struct iphdr *iph = ip_hdr(skb); 1004 const struct iphdr *iph = ip_hdr(skb);
1009 int encap_limit = -1; 1005 int encap_limit = -1;
1010 struct flowi6 fl6; 1006 struct flowi6 fl6;
1011 __u8 dsfield; 1007 __u8 dsfield;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 29e48593bf22..82a809901f8e 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -989,8 +989,8 @@ static int mif6_add(struct net *net, struct mr6_table *mrt,
989} 989}
990 990
991static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt, 991static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
992 struct in6_addr *origin, 992 const struct in6_addr *origin,
993 struct in6_addr *mcastgrp) 993 const struct in6_addr *mcastgrp)
994{ 994{
995 int line = MFC6_HASH(mcastgrp, origin); 995 int line = MFC6_HASH(mcastgrp, origin);
996 struct mfc6_cache *c; 996 struct mfc6_cache *c;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 85cccd6ed0b7..bba658d9a03c 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -55,7 +55,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
55{ 55{
56 struct net *net = dev_net(skb->dev); 56 struct net *net = dev_net(skb->dev);
57 __be32 spi; 57 __be32 spi;
58 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 58 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
59 struct ip_comp_hdr *ipcomph = 59 struct ip_comp_hdr *ipcomph =
60 (struct ip_comp_hdr *)(skb->data + offset); 60 (struct ip_comp_hdr *)(skb->data + offset);
61 struct xfrm_state *x; 61 struct xfrm_state *x;
@@ -64,7 +64,8 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
64 return; 64 return;
65 65
66 spi = htonl(ntohs(ipcomph->cpi)); 66 spi = htonl(ntohs(ipcomph->cpi));
67 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); 67 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
68 spi, IPPROTO_COMP, AF_INET6);
68 if (!x) 69 if (!x)
69 return; 70 return;
70 71
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 76b893771e6e..ff62e33ead07 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -92,16 +92,16 @@ static void mld_gq_timer_expire(unsigned long data);
92static void mld_ifc_timer_expire(unsigned long data); 92static void mld_ifc_timer_expire(unsigned long data);
93static void mld_ifc_event(struct inet6_dev *idev); 93static void mld_ifc_event(struct inet6_dev *idev);
94static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); 94static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
95static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *addr); 95static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
96static void mld_clear_delrec(struct inet6_dev *idev); 96static void mld_clear_delrec(struct inet6_dev *idev);
97static int sf_setstate(struct ifmcaddr6 *pmc); 97static int sf_setstate(struct ifmcaddr6 *pmc);
98static void sf_markstate(struct ifmcaddr6 *pmc); 98static void sf_markstate(struct ifmcaddr6 *pmc);
99static void ip6_mc_clear_src(struct ifmcaddr6 *pmc); 99static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
100static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca, 100static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
101 int sfmode, int sfcount, struct in6_addr *psfsrc, 101 int sfmode, int sfcount, const struct in6_addr *psfsrc,
102 int delta); 102 int delta);
103static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca, 103static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
104 int sfmode, int sfcount, struct in6_addr *psfsrc, 104 int sfmode, int sfcount, const struct in6_addr *psfsrc,
105 int delta); 105 int delta);
106static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, 106static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
107 struct inet6_dev *idev); 107 struct inet6_dev *idev);
@@ -250,7 +250,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
250 250
251/* called with rcu_read_lock() */ 251/* called with rcu_read_lock() */
252static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, 252static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
253 struct in6_addr *group, 253 const struct in6_addr *group,
254 int ifindex) 254 int ifindex)
255{ 255{
256 struct net_device *dev = NULL; 256 struct net_device *dev = NULL;
@@ -451,7 +451,7 @@ done:
451 451
452int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) 452int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
453{ 453{
454 struct in6_addr *group; 454 const struct in6_addr *group;
455 struct ipv6_mc_socklist *pmc; 455 struct ipv6_mc_socklist *pmc;
456 struct inet6_dev *idev; 456 struct inet6_dev *idev;
457 struct ipv6_pinfo *inet6 = inet6_sk(sk); 457 struct ipv6_pinfo *inet6 = inet6_sk(sk);
@@ -542,7 +542,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
542 struct group_filter __user *optval, int __user *optlen) 542 struct group_filter __user *optval, int __user *optlen)
543{ 543{
544 int err, i, count, copycount; 544 int err, i, count, copycount;
545 struct in6_addr *group; 545 const struct in6_addr *group;
546 struct ipv6_mc_socklist *pmc; 546 struct ipv6_mc_socklist *pmc;
547 struct inet6_dev *idev; 547 struct inet6_dev *idev;
548 struct ipv6_pinfo *inet6 = inet6_sk(sk); 548 struct ipv6_pinfo *inet6 = inet6_sk(sk);
@@ -752,7 +752,7 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
752 spin_unlock_bh(&idev->mc_lock); 752 spin_unlock_bh(&idev->mc_lock);
753} 753}
754 754
755static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca) 755static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
756{ 756{
757 struct ifmcaddr6 *pmc, *pmc_prev; 757 struct ifmcaddr6 *pmc, *pmc_prev;
758 struct ip6_sf_list *psf, *psf_next; 758 struct ip6_sf_list *psf, *psf_next;
@@ -1052,7 +1052,7 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1052 1052
1053/* mark EXCLUDE-mode sources */ 1053/* mark EXCLUDE-mode sources */
1054static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, 1054static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1055 struct in6_addr *srcs) 1055 const struct in6_addr *srcs)
1056{ 1056{
1057 struct ip6_sf_list *psf; 1057 struct ip6_sf_list *psf;
1058 int i, scount; 1058 int i, scount;
@@ -1080,7 +1080,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1080} 1080}
1081 1081
1082static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, 1082static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1083 struct in6_addr *srcs) 1083 const struct in6_addr *srcs)
1084{ 1084{
1085 struct ip6_sf_list *psf; 1085 struct ip6_sf_list *psf;
1086 int i, scount; 1086 int i, scount;
@@ -1115,7 +1115,7 @@ int igmp6_event_query(struct sk_buff *skb)
1115{ 1115{
1116 struct mld2_query *mlh2 = NULL; 1116 struct mld2_query *mlh2 = NULL;
1117 struct ifmcaddr6 *ma; 1117 struct ifmcaddr6 *ma;
1118 struct in6_addr *group; 1118 const struct in6_addr *group;
1119 unsigned long max_delay; 1119 unsigned long max_delay;
1120 struct inet6_dev *idev; 1120 struct inet6_dev *idev;
1121 struct mld_msg *mld; 1121 struct mld_msg *mld;
@@ -1821,7 +1821,7 @@ err_out:
1821} 1821}
1822 1822
1823static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 1823static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
1824 struct in6_addr *psfsrc) 1824 const struct in6_addr *psfsrc)
1825{ 1825{
1826 struct ip6_sf_list *psf, *psf_prev; 1826 struct ip6_sf_list *psf, *psf_prev;
1827 int rv = 0; 1827 int rv = 0;
@@ -1857,8 +1857,8 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
1857 return rv; 1857 return rv;
1858} 1858}
1859 1859
1860static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca, 1860static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
1861 int sfmode, int sfcount, struct in6_addr *psfsrc, 1861 int sfmode, int sfcount, const struct in6_addr *psfsrc,
1862 int delta) 1862 int delta)
1863{ 1863{
1864 struct ifmcaddr6 *pmc; 1864 struct ifmcaddr6 *pmc;
@@ -1918,7 +1918,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca,
1918 * Add multicast single-source filter to the interface list 1918 * Add multicast single-source filter to the interface list
1919 */ 1919 */
1920static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, 1920static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
1921 struct in6_addr *psfsrc, int delta) 1921 const struct in6_addr *psfsrc, int delta)
1922{ 1922{
1923 struct ip6_sf_list *psf, *psf_prev; 1923 struct ip6_sf_list *psf, *psf_prev;
1924 1924
@@ -2021,8 +2021,8 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
2021/* 2021/*
2022 * Add multicast source filter list to the interface list 2022 * Add multicast source filter list to the interface list
2023 */ 2023 */
2024static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca, 2024static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2025 int sfmode, int sfcount, struct in6_addr *psfsrc, 2025 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2026 int delta) 2026 int delta)
2027{ 2027{
2028 struct ifmcaddr6 *pmc; 2028 struct ifmcaddr6 *pmc;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 9b210482fb05..43242e6e6103 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -126,7 +126,7 @@ static struct mip6_report_rate_limiter mip6_report_rl = {
126 126
127static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb) 127static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb)
128{ 128{
129 struct ipv6hdr *iph = ipv6_hdr(skb); 129 const struct ipv6hdr *iph = ipv6_hdr(skb);
130 struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data; 130 struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data;
131 int err = destopt->nexthdr; 131 int err = destopt->nexthdr;
132 132
@@ -181,8 +181,8 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
181} 181}
182 182
183static inline int mip6_report_rl_allow(struct timeval *stamp, 183static inline int mip6_report_rl_allow(struct timeval *stamp,
184 struct in6_addr *dst, 184 const struct in6_addr *dst,
185 struct in6_addr *src, int iif) 185 const struct in6_addr *src, int iif)
186{ 186{
187 int allow = 0; 187 int allow = 0;
188 188
@@ -349,7 +349,7 @@ static const struct xfrm_type mip6_destopt_type =
349 349
350static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) 350static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
351{ 351{
352 struct ipv6hdr *iph = ipv6_hdr(skb); 352 const struct ipv6hdr *iph = ipv6_hdr(skb);
353 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data; 353 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
354 int err = rt2->rt_hdr.nexthdr; 354 int err = rt2->rt_hdr.nexthdr;
355 355
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 92f952d093db..7596f071d308 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -324,7 +324,7 @@ static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
324 return lladdr + prepad; 324 return lladdr + prepad;
325} 325}
326 326
327int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir) 327int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir)
328{ 328{
329 switch (dev->type) { 329 switch (dev->type) {
330 case ARPHRD_ETHER: 330 case ARPHRD_ETHER:
@@ -611,6 +611,29 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
611 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); 611 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0);
612} 612}
613 613
614static void ndisc_send_unsol_na(struct net_device *dev)
615{
616 struct inet6_dev *idev;
617 struct inet6_ifaddr *ifa;
618 struct in6_addr mcaddr;
619
620 idev = in6_dev_get(dev);
621 if (!idev)
622 return;
623
624 read_lock_bh(&idev->lock);
625 list_for_each_entry(ifa, &idev->addr_list, if_list) {
626 addrconf_addr_solict_mult(&ifa->addr, &mcaddr);
627 ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr,
628 /*router=*/ !!idev->cnf.forwarding,
629 /*solicited=*/ false, /*override=*/ true,
630 /*inc_opt=*/ true);
631 }
632 read_unlock_bh(&idev->lock);
633
634 in6_dev_put(idev);
635}
636
614void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, 637void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
615 const struct in6_addr *solicit, 638 const struct in6_addr *solicit,
616 const struct in6_addr *daddr, const struct in6_addr *saddr) 639 const struct in6_addr *daddr, const struct in6_addr *saddr)
@@ -725,8 +748,8 @@ static int pndisc_is_router(const void *pkey,
725static void ndisc_recv_ns(struct sk_buff *skb) 748static void ndisc_recv_ns(struct sk_buff *skb)
726{ 749{
727 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 750 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
728 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 751 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
729 struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 752 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
730 u8 *lladdr = NULL; 753 u8 *lladdr = NULL;
731 u32 ndoptlen = skb->tail - (skb->transport_header + 754 u32 ndoptlen = skb->tail - (skb->transport_header +
732 offsetof(struct nd_msg, opt)); 755 offsetof(struct nd_msg, opt));
@@ -901,8 +924,8 @@ out:
901static void ndisc_recv_na(struct sk_buff *skb) 924static void ndisc_recv_na(struct sk_buff *skb)
902{ 925{
903 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 926 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
904 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 927 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
905 struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 928 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
906 u8 *lladdr = NULL; 929 u8 *lladdr = NULL;
907 u32 ndoptlen = skb->tail - (skb->transport_header + 930 u32 ndoptlen = skb->tail - (skb->transport_header +
908 offsetof(struct nd_msg, opt)); 931 offsetof(struct nd_msg, opt));
@@ -945,9 +968,10 @@ static void ndisc_recv_na(struct sk_buff *skb)
945 } 968 }
946 ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1); 969 ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1);
947 if (ifp) { 970 if (ifp) {
948 if (ifp->flags & IFA_F_TENTATIVE) { 971 if (skb->pkt_type != PACKET_LOOPBACK
949 addrconf_dad_failure(ifp); 972 && (ifp->flags & IFA_F_TENTATIVE)) {
950 return; 973 addrconf_dad_failure(ifp);
974 return;
951 } 975 }
952 /* What should we make now? The advertisement 976 /* What should we make now? The advertisement
953 is invalid, but ndisc specs say nothing 977 is invalid, but ndisc specs say nothing
@@ -1014,7 +1038,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1014 unsigned long ndoptlen = skb->len - sizeof(*rs_msg); 1038 unsigned long ndoptlen = skb->len - sizeof(*rs_msg);
1015 struct neighbour *neigh; 1039 struct neighbour *neigh;
1016 struct inet6_dev *idev; 1040 struct inet6_dev *idev;
1017 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 1041 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
1018 struct ndisc_options ndopts; 1042 struct ndisc_options ndopts;
1019 u8 *lladdr = NULL; 1043 u8 *lladdr = NULL;
1020 1044
@@ -1411,8 +1435,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1411{ 1435{
1412 struct inet6_dev *in6_dev; 1436 struct inet6_dev *in6_dev;
1413 struct icmp6hdr *icmph; 1437 struct icmp6hdr *icmph;
1414 struct in6_addr *dest; 1438 const struct in6_addr *dest;
1415 struct in6_addr *target; /* new first hop to destination */ 1439 const struct in6_addr *target; /* new first hop to destination */
1416 struct neighbour *neigh; 1440 struct neighbour *neigh;
1417 int on_link = 0; 1441 int on_link = 0;
1418 struct ndisc_options ndopts; 1442 struct ndisc_options ndopts;
@@ -1445,7 +1469,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1445 } 1469 }
1446 1470
1447 icmph = icmp6_hdr(skb); 1471 icmph = icmp6_hdr(skb);
1448 target = (struct in6_addr *) (icmph + 1); 1472 target = (const struct in6_addr *) (icmph + 1);
1449 dest = target + 1; 1473 dest = target + 1;
1450 1474
1451 if (ipv6_addr_is_multicast(dest)) { 1475 if (ipv6_addr_is_multicast(dest)) {
@@ -1722,6 +1746,9 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1722 neigh_ifdown(&nd_tbl, dev); 1746 neigh_ifdown(&nd_tbl, dev);
1723 fib6_run_gc(~0UL, net); 1747 fib6_run_gc(~0UL, net);
1724 break; 1748 break;
1749 case NETDEV_NOTIFY_PEERS:
1750 ndisc_send_unsol_na(dev);
1751 break;
1725 default: 1752 default:
1726 break; 1753 break;
1727 } 1754 }
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 28bc1f644b7b..30fcee465448 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -13,7 +13,7 @@
13int ip6_route_me_harder(struct sk_buff *skb) 13int ip6_route_me_harder(struct sk_buff *skb)
14{ 14{
15 struct net *net = dev_net(skb_dst(skb)->dev); 15 struct net *net = dev_net(skb_dst(skb)->dev);
16 struct ipv6hdr *iph = ipv6_hdr(skb); 16 const struct ipv6hdr *iph = ipv6_hdr(skb);
17 struct dst_entry *dst; 17 struct dst_entry *dst;
18 struct flowi6 fl6 = { 18 struct flowi6 fl6 = {
19 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, 19 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
@@ -67,7 +67,7 @@ static void nf_ip6_saveroute(const struct sk_buff *skb,
67 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 67 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
68 68
69 if (entry->hook == NF_INET_LOCAL_OUT) { 69 if (entry->hook == NF_INET_LOCAL_OUT) {
70 struct ipv6hdr *iph = ipv6_hdr(skb); 70 const struct ipv6hdr *iph = ipv6_hdr(skb);
71 71
72 rt_info->daddr = iph->daddr; 72 rt_info->daddr = iph->daddr;
73 rt_info->saddr = iph->saddr; 73 rt_info->saddr = iph->saddr;
@@ -81,7 +81,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
81 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 81 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
82 82
83 if (entry->hook == NF_INET_LOCAL_OUT) { 83 if (entry->hook == NF_INET_LOCAL_OUT) {
84 struct ipv6hdr *iph = ipv6_hdr(skb); 84 const struct ipv6hdr *iph = ipv6_hdr(skb);
85 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || 85 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
86 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || 86 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
87 skb->mark != rt_info->mark) 87 skb->mark != rt_info->mark)
@@ -108,7 +108,7 @@ static int nf_ip6_route(struct net *net, struct dst_entry **dst,
108__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, 108__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
109 unsigned int dataoff, u_int8_t protocol) 109 unsigned int dataoff, u_int8_t protocol)
110{ 110{
111 struct ipv6hdr *ip6h = ipv6_hdr(skb); 111 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
112 __sum16 csum = 0; 112 __sum16 csum = 0;
113 113
114 switch (skb->ip_summed) { 114 switch (skb->ip_summed) {
@@ -142,7 +142,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
142 unsigned int dataoff, unsigned int len, 142 unsigned int dataoff, unsigned int len,
143 u_int8_t protocol) 143 u_int8_t protocol)
144{ 144{
145 struct ipv6hdr *ip6h = ipv6_hdr(skb); 145 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
146 __wsum hsum; 146 __wsum hsum;
147 __sum16 csum = 0; 147 __sum16 csum = 0;
148 148
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 5a1c6f27ffaf..94874b0bdcdc 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -340,6 +340,7 @@ ip6t_do_table(struct sk_buff *skb,
340 unsigned int *stackptr, origptr, cpu; 340 unsigned int *stackptr, origptr, cpu;
341 const struct xt_table_info *private; 341 const struct xt_table_info *private;
342 struct xt_action_param acpar; 342 struct xt_action_param acpar;
343 unsigned int addend;
343 344
344 /* Initialization */ 345 /* Initialization */
345 indev = in ? in->name : nulldevname; 346 indev = in ? in->name : nulldevname;
@@ -358,7 +359,8 @@ ip6t_do_table(struct sk_buff *skb,
358 359
359 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 360 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
360 361
361 xt_info_rdlock_bh(); 362 local_bh_disable();
363 addend = xt_write_recseq_begin();
362 private = table->private; 364 private = table->private;
363 cpu = smp_processor_id(); 365 cpu = smp_processor_id();
364 table_base = private->entries[cpu]; 366 table_base = private->entries[cpu];
@@ -442,7 +444,9 @@ ip6t_do_table(struct sk_buff *skb,
442 } while (!acpar.hotdrop); 444 } while (!acpar.hotdrop);
443 445
444 *stackptr = origptr; 446 *stackptr = origptr;
445 xt_info_rdunlock_bh(); 447
448 xt_write_recseq_end(addend);
449 local_bh_enable();
446 450
447#ifdef DEBUG_ALLOW_ALL 451#ifdef DEBUG_ALLOW_ALL
448 return NF_ACCEPT; 452 return NF_ACCEPT;
@@ -899,7 +903,7 @@ get_counters(const struct xt_table_info *t,
899 unsigned int i; 903 unsigned int i;
900 904
901 for_each_possible_cpu(cpu) { 905 for_each_possible_cpu(cpu) {
902 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 906 seqcount_t *s = &per_cpu(xt_recseq, cpu);
903 907
904 i = 0; 908 i = 0;
905 xt_entry_foreach(iter, t->entries[cpu], t->size) { 909 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -907,10 +911,10 @@ get_counters(const struct xt_table_info *t,
907 unsigned int start; 911 unsigned int start;
908 912
909 do { 913 do {
910 start = read_seqbegin(lock); 914 start = read_seqcount_begin(s);
911 bcnt = iter->counters.bcnt; 915 bcnt = iter->counters.bcnt;
912 pcnt = iter->counters.pcnt; 916 pcnt = iter->counters.pcnt;
913 } while (read_seqretry(lock, start)); 917 } while (read_seqcount_retry(s, start));
914 918
915 ADD_COUNTER(counters[i], bcnt, pcnt); 919 ADD_COUNTER(counters[i], bcnt, pcnt);
916 ++i; 920 ++i;
@@ -1325,6 +1329,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1325 int ret = 0; 1329 int ret = 0;
1326 const void *loc_cpu_entry; 1330 const void *loc_cpu_entry;
1327 struct ip6t_entry *iter; 1331 struct ip6t_entry *iter;
1332 unsigned int addend;
1328#ifdef CONFIG_COMPAT 1333#ifdef CONFIG_COMPAT
1329 struct compat_xt_counters_info compat_tmp; 1334 struct compat_xt_counters_info compat_tmp;
1330 1335
@@ -1381,13 +1386,13 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1381 i = 0; 1386 i = 0;
1382 /* Choose the copy that is on our node */ 1387 /* Choose the copy that is on our node */
1383 curcpu = smp_processor_id(); 1388 curcpu = smp_processor_id();
1384 xt_info_wrlock(curcpu); 1389 addend = xt_write_recseq_begin();
1385 loc_cpu_entry = private->entries[curcpu]; 1390 loc_cpu_entry = private->entries[curcpu];
1386 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1391 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1387 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1392 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1388 ++i; 1393 ++i;
1389 } 1394 }
1390 xt_info_wrunlock(curcpu); 1395 xt_write_recseq_end(addend);
1391 1396
1392 unlock_up_free: 1397 unlock_up_free:
1393 local_bh_enable(); 1398 local_bh_enable();
@@ -1578,7 +1583,6 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1578 struct xt_table_info *newinfo, unsigned char *base) 1583 struct xt_table_info *newinfo, unsigned char *base)
1579{ 1584{
1580 struct xt_entry_target *t; 1585 struct xt_entry_target *t;
1581 struct xt_target *target;
1582 struct ip6t_entry *de; 1586 struct ip6t_entry *de;
1583 unsigned int origsize; 1587 unsigned int origsize;
1584 int ret, h; 1588 int ret, h;
@@ -1600,7 +1604,6 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1600 } 1604 }
1601 de->target_offset = e->target_offset - (origsize - *size); 1605 de->target_offset = e->target_offset - (origsize - *size);
1602 t = compat_ip6t_get_target(e); 1606 t = compat_ip6t_get_target(e);
1603 target = t->u.kernel.target;
1604 xt_compat_target_from_user(t, dstptr, size); 1607 xt_compat_target_from_user(t, dstptr, size);
1605 1608
1606 de->next_offset = e->next_offset - (origsize - *size); 1609 de->next_offset = e->next_offset - (origsize - *size);
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 679a0a3b7b3c..00d19173db7e 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -64,7 +64,8 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
64 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || 64 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
65 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || 65 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
66 skb->mark != mark || 66 skb->mark != mark ||
67 ipv6_hdr(skb)->hop_limit != hop_limit)) 67 ipv6_hdr(skb)->hop_limit != hop_limit ||
68 flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
68 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP; 69 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
69 70
70 return ret; 71 return ret;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4a1c3b46c56b..e5e5425fe7d0 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -67,8 +67,8 @@ static struct raw_hashinfo raw_v6_hashinfo = {
67}; 67};
68 68
69static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, 69static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
70 unsigned short num, struct in6_addr *loc_addr, 70 unsigned short num, const struct in6_addr *loc_addr,
71 struct in6_addr *rmt_addr, int dif) 71 const struct in6_addr *rmt_addr, int dif)
72{ 72{
73 struct hlist_node *node; 73 struct hlist_node *node;
74 int is_multicast = ipv6_addr_is_multicast(loc_addr); 74 int is_multicast = ipv6_addr_is_multicast(loc_addr);
@@ -154,8 +154,8 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
154 */ 154 */
155static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) 155static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
156{ 156{
157 struct in6_addr *saddr; 157 const struct in6_addr *saddr;
158 struct in6_addr *daddr; 158 const struct in6_addr *daddr;
159 struct sock *sk; 159 struct sock *sk;
160 int delivered = 0; 160 int delivered = 0;
161 __u8 hash; 161 __u8 hash;
@@ -348,7 +348,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
348{ 348{
349 struct sock *sk; 349 struct sock *sk;
350 int hash; 350 int hash;
351 struct in6_addr *saddr, *daddr; 351 const struct in6_addr *saddr, *daddr;
352 struct net *net; 352 struct net *net;
353 353
354 hash = nexthdr & (RAW_HTABLE_SIZE - 1); 354 hash = nexthdr & (RAW_HTABLE_SIZE - 1);
@@ -357,7 +357,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
357 sk = sk_head(&raw_v6_hashinfo.ht[hash]); 357 sk = sk_head(&raw_v6_hashinfo.ht[hash]);
358 if (sk != NULL) { 358 if (sk != NULL) {
359 /* Note: ipv6_hdr(skb) != skb->data */ 359 /* Note: ipv6_hdr(skb) != skb->data */
360 struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data; 360 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
361 saddr = &ip6h->saddr; 361 saddr = &ip6h->saddr;
362 daddr = &ip6h->daddr; 362 daddr = &ip6h->daddr;
363 net = dev_net(skb->dev); 363 net = dev_net(skb->dev);
@@ -1231,7 +1231,7 @@ struct proto rawv6_prot = {
1231static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) 1231static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1232{ 1232{
1233 struct ipv6_pinfo *np = inet6_sk(sp); 1233 struct ipv6_pinfo *np = inet6_sk(sp);
1234 struct in6_addr *dest, *src; 1234 const struct in6_addr *dest, *src;
1235 __u16 destp, srcp; 1235 __u16 destp, srcp;
1236 1236
1237 dest = &np->daddr; 1237 dest = &np->daddr;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 07beeb06f752..7b954e2539d0 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -224,7 +224,7 @@ out:
224} 224}
225 225
226static __inline__ struct frag_queue * 226static __inline__ struct frag_queue *
227fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst) 227fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
228{ 228{
229 struct inet_frag_queue *q; 229 struct inet_frag_queue *q;
230 struct ip6_create_arg arg; 230 struct ip6_create_arg arg;
@@ -535,7 +535,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
535{ 535{
536 struct frag_hdr *fhdr; 536 struct frag_hdr *fhdr;
537 struct frag_queue *fq; 537 struct frag_queue *fq;
538 struct ipv6hdr *hdr = ipv6_hdr(skb); 538 const struct ipv6hdr *hdr = ipv6_hdr(skb);
539 struct net *net = dev_net(skb_dst(skb)->dev); 539 struct net *net = dev_net(skb_dst(skb)->dev);
540 540
541 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 541 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fd0eec6f88c6..f1be5c5c85ef 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -89,12 +89,12 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
89 89
90#ifdef CONFIG_IPV6_ROUTE_INFO 90#ifdef CONFIG_IPV6_ROUTE_INFO
91static struct rt6_info *rt6_add_route_info(struct net *net, 91static struct rt6_info *rt6_add_route_info(struct net *net,
92 struct in6_addr *prefix, int prefixlen, 92 const struct in6_addr *prefix, int prefixlen,
93 struct in6_addr *gwaddr, int ifindex, 93 const struct in6_addr *gwaddr, int ifindex,
94 unsigned pref); 94 unsigned pref);
95static struct rt6_info *rt6_get_route_info(struct net *net, 95static struct rt6_info *rt6_get_route_info(struct net *net,
96 struct in6_addr *prefix, int prefixlen, 96 const struct in6_addr *prefix, int prefixlen,
97 struct in6_addr *gwaddr, int ifindex); 97 const struct in6_addr *gwaddr, int ifindex);
98#endif 98#endif
99 99
100static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) 100static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
@@ -227,9 +227,14 @@ static struct rt6_info ip6_blk_hole_entry_template = {
227#endif 227#endif
228 228
229/* allocate dst with ip6_dst_ops */ 229/* allocate dst with ip6_dst_ops */
230static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops) 230static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
231 struct net_device *dev)
231{ 232{
232 return (struct rt6_info *)dst_alloc(ops, 0); 233 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, 0);
234
235 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
236
237 return rt;
233} 238}
234 239
235static void ip6_dst_destroy(struct dst_entry *dst) 240static void ip6_dst_destroy(struct dst_entry *dst)
@@ -290,7 +295,7 @@ static __inline__ int rt6_check_expired(const struct rt6_info *rt)
290 time_after(jiffies, rt->rt6i_expires); 295 time_after(jiffies, rt->rt6i_expires);
291} 296}
292 297
293static inline int rt6_need_strict(struct in6_addr *daddr) 298static inline int rt6_need_strict(const struct in6_addr *daddr)
294{ 299{
295 return ipv6_addr_type(daddr) & 300 return ipv6_addr_type(daddr) &
296 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 301 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
@@ -302,7 +307,7 @@ static inline int rt6_need_strict(struct in6_addr *daddr)
302 307
303static inline struct rt6_info *rt6_device_match(struct net *net, 308static inline struct rt6_info *rt6_device_match(struct net *net,
304 struct rt6_info *rt, 309 struct rt6_info *rt,
305 struct in6_addr *saddr, 310 const struct in6_addr *saddr,
306 int oif, 311 int oif,
307 int flags) 312 int flags)
308{ 313{
@@ -514,7 +519,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
514 519
515#ifdef CONFIG_IPV6_ROUTE_INFO 520#ifdef CONFIG_IPV6_ROUTE_INFO
516int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 521int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
517 struct in6_addr *gwaddr) 522 const struct in6_addr *gwaddr)
518{ 523{
519 struct net *net = dev_net(dev); 524 struct net *net = dev_net(dev);
520 struct route_info *rinfo = (struct route_info *) opt; 525 struct route_info *rinfo = (struct route_info *) opt;
@@ -677,8 +682,8 @@ int ip6_ins_rt(struct rt6_info *rt)
677 return __ip6_ins_rt(rt, &info); 682 return __ip6_ins_rt(rt, &info);
678} 683}
679 684
680static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr, 685static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_addr *daddr,
681 struct in6_addr *saddr) 686 const struct in6_addr *saddr)
682{ 687{
683 struct rt6_info *rt; 688 struct rt6_info *rt;
684 689
@@ -746,7 +751,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
746 return rt; 751 return rt;
747} 752}
748 753
749static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr) 754static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, const struct in6_addr *daddr)
750{ 755{
751 struct rt6_info *rt = ip6_rt_copy(ort); 756 struct rt6_info *rt = ip6_rt_copy(ort);
752 if (rt) { 757 if (rt) {
@@ -837,7 +842,7 @@ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *
837 842
838void ip6_route_input(struct sk_buff *skb) 843void ip6_route_input(struct sk_buff *skb)
839{ 844{
840 struct ipv6hdr *iph = ipv6_hdr(skb); 845 const struct ipv6hdr *iph = ipv6_hdr(skb);
841 struct net *net = dev_net(skb->dev); 846 struct net *net = dev_net(skb->dev);
842 int flags = RT6_LOOKUP_F_HAS_SADDR; 847 int flags = RT6_LOOKUP_F_HAS_SADDR;
843 struct flowi6 fl6 = { 848 struct flowi6 fl6 = {
@@ -881,11 +886,13 @@ EXPORT_SYMBOL(ip6_route_output);
881 886
882struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) 887struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
883{ 888{
884 struct rt6_info *rt = dst_alloc(&ip6_dst_blackhole_ops, 1); 889 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
885 struct rt6_info *ort = (struct rt6_info *) dst_orig;
886 struct dst_entry *new = NULL; 890 struct dst_entry *new = NULL;
887 891
892 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0);
888 if (rt) { 893 if (rt) {
894 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
895
889 new = &rt->dst; 896 new = &rt->dst;
890 897
891 new->__use = 1; 898 new->__use = 1;
@@ -893,9 +900,6 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
893 new->output = dst_discard; 900 new->output = dst_discard;
894 901
895 dst_copy_metrics(new, &ort->dst); 902 dst_copy_metrics(new, &ort->dst);
896 new->dev = ort->dst.dev;
897 if (new->dev)
898 dev_hold(new->dev);
899 rt->rt6i_idev = ort->rt6i_idev; 903 rt->rt6i_idev = ort->rt6i_idev;
900 if (rt->rt6i_idev) 904 if (rt->rt6i_idev)
901 in6_dev_hold(rt->rt6i_idev); 905 in6_dev_hold(rt->rt6i_idev);
@@ -1038,13 +1042,12 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1038 if (unlikely(idev == NULL)) 1042 if (unlikely(idev == NULL))
1039 return NULL; 1043 return NULL;
1040 1044
1041 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1045 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev);
1042 if (unlikely(rt == NULL)) { 1046 if (unlikely(rt == NULL)) {
1043 in6_dev_put(idev); 1047 in6_dev_put(idev);
1044 goto out; 1048 goto out;
1045 } 1049 }
1046 1050
1047 dev_hold(dev);
1048 if (neigh) 1051 if (neigh)
1049 neigh_hold(neigh); 1052 neigh_hold(neigh);
1050 else { 1053 else {
@@ -1053,7 +1056,6 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1053 neigh = NULL; 1056 neigh = NULL;
1054 } 1057 }
1055 1058
1056 rt->rt6i_dev = dev;
1057 rt->rt6i_idev = idev; 1059 rt->rt6i_idev = idev;
1058 rt->rt6i_nexthop = neigh; 1060 rt->rt6i_nexthop = neigh;
1059 atomic_set(&rt->dst.__refcnt, 1); 1061 atomic_set(&rt->dst.__refcnt, 1);
@@ -1212,7 +1214,7 @@ int ip6_route_add(struct fib6_config *cfg)
1212 goto out; 1214 goto out;
1213 } 1215 }
1214 1216
1215 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1217 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL);
1216 1218
1217 if (rt == NULL) { 1219 if (rt == NULL) {
1218 err = -ENOMEM; 1220 err = -ENOMEM;
@@ -1279,7 +1281,7 @@ int ip6_route_add(struct fib6_config *cfg)
1279 } 1281 }
1280 1282
1281 if (cfg->fc_flags & RTF_GATEWAY) { 1283 if (cfg->fc_flags & RTF_GATEWAY) {
1282 struct in6_addr *gw_addr; 1284 const struct in6_addr *gw_addr;
1283 int gwa_type; 1285 int gwa_type;
1284 1286
1285 gw_addr = &cfg->fc_gateway; 1287 gw_addr = &cfg->fc_gateway;
@@ -1332,6 +1334,16 @@ int ip6_route_add(struct fib6_config *cfg)
1332 if (dev == NULL) 1334 if (dev == NULL)
1333 goto out; 1335 goto out;
1334 1336
1337 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1338 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1339 err = -EINVAL;
1340 goto out;
1341 }
1342 ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc);
1343 rt->rt6i_prefsrc.plen = 128;
1344 } else
1345 rt->rt6i_prefsrc.plen = 0;
1346
1335 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) { 1347 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1336 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev); 1348 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1337 if (IS_ERR(rt->rt6i_nexthop)) { 1349 if (IS_ERR(rt->rt6i_nexthop)) {
@@ -1509,9 +1521,9 @@ out:
1509 return rt; 1521 return rt;
1510}; 1522};
1511 1523
1512static struct rt6_info *ip6_route_redirect(struct in6_addr *dest, 1524static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
1513 struct in6_addr *src, 1525 const struct in6_addr *src,
1514 struct in6_addr *gateway, 1526 const struct in6_addr *gateway,
1515 struct net_device *dev) 1527 struct net_device *dev)
1516{ 1528{
1517 int flags = RT6_LOOKUP_F_HAS_SADDR; 1529 int flags = RT6_LOOKUP_F_HAS_SADDR;
@@ -1533,8 +1545,8 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1533 flags, __ip6_route_redirect); 1545 flags, __ip6_route_redirect);
1534} 1546}
1535 1547
1536void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, 1548void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1537 struct in6_addr *saddr, 1549 const struct in6_addr *saddr,
1538 struct neighbour *neigh, u8 *lladdr, int on_link) 1550 struct neighbour *neigh, u8 *lladdr, int on_link)
1539{ 1551{
1540 struct rt6_info *rt, *nrt = NULL; 1552 struct rt6_info *rt, *nrt = NULL;
@@ -1608,7 +1620,7 @@ out:
1608 * i.e. Path MTU discovery 1620 * i.e. Path MTU discovery
1609 */ 1621 */
1610 1622
1611static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr, 1623static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
1612 struct net *net, u32 pmtu, int ifindex) 1624 struct net *net, u32 pmtu, int ifindex)
1613{ 1625{
1614 struct rt6_info *rt, *nrt; 1626 struct rt6_info *rt, *nrt;
@@ -1693,7 +1705,7 @@ out:
1693 dst_release(&rt->dst); 1705 dst_release(&rt->dst);
1694} 1706}
1695 1707
1696void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, 1708void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
1697 struct net_device *dev, u32 pmtu) 1709 struct net_device *dev, u32 pmtu)
1698{ 1710{
1699 struct net *net = dev_net(dev); 1711 struct net *net = dev_net(dev);
@@ -1721,7 +1733,8 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1721static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) 1733static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1722{ 1734{
1723 struct net *net = dev_net(ort->rt6i_dev); 1735 struct net *net = dev_net(ort->rt6i_dev);
1724 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1736 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
1737 ort->dst.dev);
1725 1738
1726 if (rt) { 1739 if (rt) {
1727 rt->dst.input = ort->dst.input; 1740 rt->dst.input = ort->dst.input;
@@ -1729,9 +1742,6 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1729 1742
1730 dst_copy_metrics(&rt->dst, &ort->dst); 1743 dst_copy_metrics(&rt->dst, &ort->dst);
1731 rt->dst.error = ort->dst.error; 1744 rt->dst.error = ort->dst.error;
1732 rt->dst.dev = ort->dst.dev;
1733 if (rt->dst.dev)
1734 dev_hold(rt->dst.dev);
1735 rt->rt6i_idev = ort->rt6i_idev; 1745 rt->rt6i_idev = ort->rt6i_idev;
1736 if (rt->rt6i_idev) 1746 if (rt->rt6i_idev)
1737 in6_dev_hold(rt->rt6i_idev); 1747 in6_dev_hold(rt->rt6i_idev);
@@ -1753,8 +1763,8 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1753 1763
1754#ifdef CONFIG_IPV6_ROUTE_INFO 1764#ifdef CONFIG_IPV6_ROUTE_INFO
1755static struct rt6_info *rt6_get_route_info(struct net *net, 1765static struct rt6_info *rt6_get_route_info(struct net *net,
1756 struct in6_addr *prefix, int prefixlen, 1766 const struct in6_addr *prefix, int prefixlen,
1757 struct in6_addr *gwaddr, int ifindex) 1767 const struct in6_addr *gwaddr, int ifindex)
1758{ 1768{
1759 struct fib6_node *fn; 1769 struct fib6_node *fn;
1760 struct rt6_info *rt = NULL; 1770 struct rt6_info *rt = NULL;
@@ -1785,8 +1795,8 @@ out:
1785} 1795}
1786 1796
1787static struct rt6_info *rt6_add_route_info(struct net *net, 1797static struct rt6_info *rt6_add_route_info(struct net *net,
1788 struct in6_addr *prefix, int prefixlen, 1798 const struct in6_addr *prefix, int prefixlen,
1789 struct in6_addr *gwaddr, int ifindex, 1799 const struct in6_addr *gwaddr, int ifindex,
1790 unsigned pref) 1800 unsigned pref)
1791{ 1801{
1792 struct fib6_config cfg = { 1802 struct fib6_config cfg = {
@@ -1814,7 +1824,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
1814} 1824}
1815#endif 1825#endif
1816 1826
1817struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev) 1827struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1818{ 1828{
1819 struct rt6_info *rt; 1829 struct rt6_info *rt;
1820 struct fib6_table *table; 1830 struct fib6_table *table;
@@ -1836,7 +1846,7 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
1836 return rt; 1846 return rt;
1837} 1847}
1838 1848
1839struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr, 1849struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1840 struct net_device *dev, 1850 struct net_device *dev,
1841 unsigned int pref) 1851 unsigned int pref)
1842{ 1852{
@@ -2001,7 +2011,8 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2001 int anycast) 2011 int anycast)
2002{ 2012{
2003 struct net *net = dev_net(idev->dev); 2013 struct net *net = dev_net(idev->dev);
2004 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 2014 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
2015 net->loopback_dev);
2005 struct neighbour *neigh; 2016 struct neighbour *neigh;
2006 2017
2007 if (rt == NULL) { 2018 if (rt == NULL) {
@@ -2011,13 +2022,11 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2011 return ERR_PTR(-ENOMEM); 2022 return ERR_PTR(-ENOMEM);
2012 } 2023 }
2013 2024
2014 dev_hold(net->loopback_dev);
2015 in6_dev_hold(idev); 2025 in6_dev_hold(idev);
2016 2026
2017 rt->dst.flags = DST_HOST; 2027 rt->dst.flags = DST_HOST;
2018 rt->dst.input = ip6_input; 2028 rt->dst.input = ip6_input;
2019 rt->dst.output = ip6_output; 2029 rt->dst.output = ip6_output;
2020 rt->rt6i_dev = net->loopback_dev;
2021 rt->rt6i_idev = idev; 2030 rt->rt6i_idev = idev;
2022 rt->dst.obsolete = -1; 2031 rt->dst.obsolete = -1;
2023 2032
@@ -2043,6 +2052,55 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2043 return rt; 2052 return rt;
2044} 2053}
2045 2054
2055int ip6_route_get_saddr(struct net *net,
2056 struct rt6_info *rt,
2057 const struct in6_addr *daddr,
2058 unsigned int prefs,
2059 struct in6_addr *saddr)
2060{
2061 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2062 int err = 0;
2063 if (rt->rt6i_prefsrc.plen)
2064 ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr);
2065 else
2066 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2067 daddr, prefs, saddr);
2068 return err;
2069}
2070
2071/* remove deleted ip from prefsrc entries */
2072struct arg_dev_net_ip {
2073 struct net_device *dev;
2074 struct net *net;
2075 struct in6_addr *addr;
2076};
2077
2078static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2079{
2080 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2081 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2082 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2083
2084 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
2085 rt != net->ipv6.ip6_null_entry &&
2086 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2087 /* remove prefsrc entry */
2088 rt->rt6i_prefsrc.plen = 0;
2089 }
2090 return 0;
2091}
2092
2093void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2094{
2095 struct net *net = dev_net(ifp->idev->dev);
2096 struct arg_dev_net_ip adni = {
2097 .dev = ifp->idev->dev,
2098 .net = net,
2099 .addr = &ifp->addr,
2100 };
2101 fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2102}
2103
2046struct arg_dev_net { 2104struct arg_dev_net {
2047 struct net_device *dev; 2105 struct net_device *dev;
2048 struct net *net; 2106 struct net *net;
@@ -2189,6 +2247,9 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2189 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); 2247 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2190 } 2248 }
2191 2249
2250 if (tb[RTA_PREFSRC])
2251 nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2252
2192 if (tb[RTA_OIF]) 2253 if (tb[RTA_OIF])
2193 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]); 2254 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2194 2255
@@ -2331,13 +2392,17 @@ static int rt6_fill_node(struct net *net,
2331#endif 2392#endif
2332 NLA_PUT_U32(skb, RTA_IIF, iif); 2393 NLA_PUT_U32(skb, RTA_IIF, iif);
2333 } else if (dst) { 2394 } else if (dst) {
2334 struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
2335 struct in6_addr saddr_buf; 2395 struct in6_addr saddr_buf;
2336 if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, 2396 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
2337 dst, 0, &saddr_buf) == 0)
2338 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2397 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2339 } 2398 }
2340 2399
2400 if (rt->rt6i_prefsrc.plen) {
2401 struct in6_addr saddr_buf;
2402 ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr);
2403 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2404 }
2405
2341 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2406 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2342 goto nla_put_failure; 2407 goto nla_put_failure;
2343 2408
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 43b33373adb2..a6a32b39b607 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -250,11 +250,6 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
250 250
251 dev_net_set(dev, net); 251 dev_net_set(dev, net);
252 252
253 if (strchr(name, '%')) {
254 if (dev_alloc_name(dev, name) < 0)
255 goto failed_free;
256 }
257
258 nt = netdev_priv(dev); 253 nt = netdev_priv(dev);
259 254
260 nt->parms = *parms; 255 nt->parms = *parms;
@@ -452,7 +447,7 @@ out:
452} 447}
453 448
454static int 449static int
455isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t) 450isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
456{ 451{
457 struct ip_tunnel_prl_entry *p; 452 struct ip_tunnel_prl_entry *p;
458 int ok = 1; 453 int ok = 1;
@@ -465,7 +460,8 @@ isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t)
465 else 460 else
466 skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT; 461 skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
467 } else { 462 } else {
468 struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr; 463 const struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
464
469 if (ipv6_addr_is_isatap(addr6) && 465 if (ipv6_addr_is_isatap(addr6) &&
470 (addr6->s6_addr32[3] == iph->saddr) && 466 (addr6->s6_addr32[3] == iph->saddr) &&
471 ipv6_chk_prefix(addr6, t->dev)) 467 ipv6_chk_prefix(addr6, t->dev))
@@ -499,7 +495,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
499 8 bytes of packet payload. It means, that precise relaying of 495 8 bytes of packet payload. It means, that precise relaying of
500 ICMP in the real Internet is absolutely infeasible. 496 ICMP in the real Internet is absolutely infeasible.
501 */ 497 */
502 struct iphdr *iph = (struct iphdr*)skb->data; 498 const struct iphdr *iph = (const struct iphdr *)skb->data;
503 const int type = icmp_hdr(skb)->type; 499 const int type = icmp_hdr(skb)->type;
504 const int code = icmp_hdr(skb)->code; 500 const int code = icmp_hdr(skb)->code;
505 struct ip_tunnel *t; 501 struct ip_tunnel *t;
@@ -557,7 +553,7 @@ out:
557 return err; 553 return err;
558} 554}
559 555
560static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 556static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
561{ 557{
562 if (INET_ECN_is_ce(iph->tos)) 558 if (INET_ECN_is_ce(iph->tos))
563 IP6_ECN_set_ce(ipv6_hdr(skb)); 559 IP6_ECN_set_ce(ipv6_hdr(skb));
@@ -565,7 +561,7 @@ static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
565 561
566static int ipip6_rcv(struct sk_buff *skb) 562static int ipip6_rcv(struct sk_buff *skb)
567{ 563{
568 struct iphdr *iph; 564 const struct iphdr *iph;
569 struct ip_tunnel *tunnel; 565 struct ip_tunnel *tunnel;
570 566
571 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 567 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
@@ -621,7 +617,7 @@ out:
621 * comes from 6rd / 6to4 (RFC 3056) addr space. 617 * comes from 6rd / 6to4 (RFC 3056) addr space.
622 */ 618 */
623static inline 619static inline
624__be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel) 620__be32 try_6rd(const struct in6_addr *v6dst, struct ip_tunnel *tunnel)
625{ 621{
626 __be32 dst = 0; 622 __be32 dst = 0;
627 623
@@ -664,8 +660,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
664{ 660{
665 struct ip_tunnel *tunnel = netdev_priv(dev); 661 struct ip_tunnel *tunnel = netdev_priv(dev);
666 struct pcpu_tstats *tstats; 662 struct pcpu_tstats *tstats;
667 struct iphdr *tiph = &tunnel->parms.iph; 663 const struct iphdr *tiph = &tunnel->parms.iph;
668 struct ipv6hdr *iph6 = ipv6_hdr(skb); 664 const struct ipv6hdr *iph6 = ipv6_hdr(skb);
669 u8 tos = tunnel->parms.iph.tos; 665 u8 tos = tunnel->parms.iph.tos;
670 __be16 df = tiph->frag_off; 666 __be16 df = tiph->frag_off;
671 struct rtable *rt; /* Route to the other host */ 667 struct rtable *rt; /* Route to the other host */
@@ -673,8 +669,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
673 struct iphdr *iph; /* Our new IP header */ 669 struct iphdr *iph; /* Our new IP header */
674 unsigned int max_headroom; /* The extra header space needed */ 670 unsigned int max_headroom; /* The extra header space needed */
675 __be32 dst = tiph->daddr; 671 __be32 dst = tiph->daddr;
672 struct flowi4 fl4;
676 int mtu; 673 int mtu;
677 struct in6_addr *addr6; 674 const struct in6_addr *addr6;
678 int addr_type; 675 int addr_type;
679 676
680 if (skb->protocol != htons(ETH_P_IPV6)) 677 if (skb->protocol != htons(ETH_P_IPV6))
@@ -693,7 +690,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
693 goto tx_error; 690 goto tx_error;
694 } 691 }
695 692
696 addr6 = (struct in6_addr*)&neigh->primary_key; 693 addr6 = (const struct in6_addr*)&neigh->primary_key;
697 addr_type = ipv6_addr_type(addr6); 694 addr_type = ipv6_addr_type(addr6);
698 695
699 if ((addr_type & IPV6_ADDR_UNICAST) && 696 if ((addr_type & IPV6_ADDR_UNICAST) &&
@@ -718,7 +715,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
718 goto tx_error; 715 goto tx_error;
719 } 716 }
720 717
721 addr6 = (struct in6_addr*)&neigh->primary_key; 718 addr6 = (const struct in6_addr*)&neigh->primary_key;
722 addr_type = ipv6_addr_type(addr6); 719 addr_type = ipv6_addr_type(addr6);
723 720
724 if (addr_type == IPV6_ADDR_ANY) { 721 if (addr_type == IPV6_ADDR_ANY) {
@@ -732,7 +729,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
732 dst = addr6->s6_addr32[3]; 729 dst = addr6->s6_addr32[3];
733 } 730 }
734 731
735 rt = ip_route_output_ports(dev_net(dev), NULL, 732 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
736 dst, tiph->saddr, 733 dst, tiph->saddr,
737 0, 0, 734 0, 0,
738 IPPROTO_IPV6, RT_TOS(tos), 735 IPPROTO_IPV6, RT_TOS(tos),
@@ -826,8 +823,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
826 iph->frag_off = df; 823 iph->frag_off = df;
827 iph->protocol = IPPROTO_IPV6; 824 iph->protocol = IPPROTO_IPV6;
828 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 825 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
829 iph->daddr = rt->rt_dst; 826 iph->daddr = fl4.daddr;
830 iph->saddr = rt->rt_src; 827 iph->saddr = fl4.saddr;
831 828
832 if ((iph->ttl = tiph->ttl) == 0) 829 if ((iph->ttl = tiph->ttl) == 0)
833 iph->ttl = iph6->hop_limit; 830 iph->ttl = iph6->hop_limit;
@@ -849,13 +846,14 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
849{ 846{
850 struct net_device *tdev = NULL; 847 struct net_device *tdev = NULL;
851 struct ip_tunnel *tunnel; 848 struct ip_tunnel *tunnel;
852 struct iphdr *iph; 849 const struct iphdr *iph;
850 struct flowi4 fl4;
853 851
854 tunnel = netdev_priv(dev); 852 tunnel = netdev_priv(dev);
855 iph = &tunnel->parms.iph; 853 iph = &tunnel->parms.iph;
856 854
857 if (iph->daddr) { 855 if (iph->daddr) {
858 struct rtable *rt = ip_route_output_ports(dev_net(dev), NULL, 856 struct rtable *rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
859 iph->daddr, iph->saddr, 857 iph->daddr, iph->saddr,
860 0, 0, 858 0, 0,
861 IPPROTO_IPV6, 859 IPPROTO_IPV6,
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 352c26081f5d..8b9644a8b697 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -66,7 +66,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
66static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], 66static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
67 ipv6_cookie_scratch); 67 ipv6_cookie_scratch);
68 68
69static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, 69static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr,
70 __be16 sport, __be16 dport, u32 count, int c) 70 __be16 sport, __be16 dport, u32 count, int c)
71{ 71{
72 __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch); 72 __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
@@ -86,7 +86,8 @@ static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
86 return tmp[17]; 86 return tmp[17];
87} 87}
88 88
89static __u32 secure_tcp_syn_cookie(struct in6_addr *saddr, struct in6_addr *daddr, 89static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
90 const struct in6_addr *daddr,
90 __be16 sport, __be16 dport, __u32 sseq, 91 __be16 sport, __be16 dport, __u32 sseq,
91 __u32 count, __u32 data) 92 __u32 count, __u32 data)
92{ 93{
@@ -96,8 +97,8 @@ static __u32 secure_tcp_syn_cookie(struct in6_addr *saddr, struct in6_addr *dadd
96 & COOKIEMASK)); 97 & COOKIEMASK));
97} 98}
98 99
99static __u32 check_tcp_syn_cookie(__u32 cookie, struct in6_addr *saddr, 100static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
100 struct in6_addr *daddr, __be16 sport, 101 const struct in6_addr *daddr, __be16 sport,
101 __be16 dport, __u32 sseq, __u32 count, 102 __be16 dport, __u32 sseq, __u32 count,
102 __u32 maxdiff) 103 __u32 maxdiff)
103{ 104{
@@ -116,7 +117,7 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, struct in6_addr *saddr,
116 117
117__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) 118__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
118{ 119{
119 struct ipv6hdr *iph = ipv6_hdr(skb); 120 const struct ipv6hdr *iph = ipv6_hdr(skb);
120 const struct tcphdr *th = tcp_hdr(skb); 121 const struct tcphdr *th = tcp_hdr(skb);
121 int mssind; 122 int mssind;
122 const __u16 mss = *mssp; 123 const __u16 mss = *mssp;
@@ -138,7 +139,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
138 139
139static inline int cookie_check(struct sk_buff *skb, __u32 cookie) 140static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
140{ 141{
141 struct ipv6hdr *iph = ipv6_hdr(skb); 142 const struct ipv6hdr *iph = ipv6_hdr(skb);
142 const struct tcphdr *th = tcp_hdr(skb); 143 const struct tcphdr *th = tcp_hdr(skb);
143 __u32 seq = ntohl(th->seq) - 1; 144 __u32 seq = ntohl(th->seq) - 1;
144 __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr, 145 __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4f49e5dd41bb..868366470b4a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -76,8 +76,8 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78static void __tcp_v6_send_check(struct sk_buff *skb, 78static void __tcp_v6_send_check(struct sk_buff *skb,
79 struct in6_addr *saddr, 79 const struct in6_addr *saddr,
80 struct in6_addr *daddr); 80 const struct in6_addr *daddr);
81 81
82static const struct inet_connection_sock_af_ops ipv6_mapped; 82static const struct inet_connection_sock_af_ops ipv6_mapped;
83static const struct inet_connection_sock_af_ops ipv6_specific; 83static const struct inet_connection_sock_af_ops ipv6_specific;
@@ -86,7 +86,7 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87#else 87#else
88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89 struct in6_addr *addr) 89 const struct in6_addr *addr)
90{ 90{
91 return NULL; 91 return NULL;
92} 92}
@@ -106,8 +106,8 @@ static void tcp_v6_hash(struct sock *sk)
106} 106}
107 107
108static __inline__ __sum16 tcp_v6_check(int len, 108static __inline__ __sum16 tcp_v6_check(int len,
109 struct in6_addr *saddr, 109 const struct in6_addr *saddr,
110 struct in6_addr *daddr, 110 const struct in6_addr *daddr,
111 __wsum base) 111 __wsum base)
112{ 112{
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); 113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
@@ -331,7 +331,7 @@ failure:
331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
332 u8 type, u8 code, int offset, __be32 info) 332 u8 type, u8 code, int offset, __be32 info)
333{ 333{
334 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 334 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
336 struct ipv6_pinfo *np; 336 struct ipv6_pinfo *np;
337 struct sock *sk; 337 struct sock *sk;
@@ -551,7 +551,7 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
551 551
552#ifdef CONFIG_TCP_MD5SIG 552#ifdef CONFIG_TCP_MD5SIG
553static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 553static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
554 struct in6_addr *addr) 554 const struct in6_addr *addr)
555{ 555{
556 struct tcp_sock *tp = tcp_sk(sk); 556 struct tcp_sock *tp = tcp_sk(sk);
557 int i; 557 int i;
@@ -580,7 +580,7 @@ static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
580 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 580 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
581} 581}
582 582
583static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, 583static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
584 char *newkey, u8 newkeylen) 584 char *newkey, u8 newkeylen)
585{ 585{
586 /* Add key to the list */ 586 /* Add key to the list */
@@ -645,7 +645,7 @@ static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
645 newkey, newkeylen); 645 newkey, newkeylen);
646} 646}
647 647
648static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) 648static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
649{ 649{
650 struct tcp_sock *tp = tcp_sk(sk); 650 struct tcp_sock *tp = tcp_sk(sk);
651 int i; 651 int i;
@@ -753,8 +753,8 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
753} 753}
754 754
755static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 755static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
756 struct in6_addr *daddr, 756 const struct in6_addr *daddr,
757 struct in6_addr *saddr, int nbytes) 757 const struct in6_addr *saddr, int nbytes)
758{ 758{
759 struct tcp6_pseudohdr *bp; 759 struct tcp6_pseudohdr *bp;
760 struct scatterlist sg; 760 struct scatterlist sg;
@@ -771,7 +771,7 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
771} 771}
772 772
773static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 773static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
774 struct in6_addr *daddr, struct in6_addr *saddr, 774 const struct in6_addr *daddr, struct in6_addr *saddr,
775 struct tcphdr *th) 775 struct tcphdr *th)
776{ 776{
777 struct tcp_md5sig_pool *hp; 777 struct tcp_md5sig_pool *hp;
@@ -807,7 +807,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
807 struct sock *sk, struct request_sock *req, 807 struct sock *sk, struct request_sock *req,
808 struct sk_buff *skb) 808 struct sk_buff *skb)
809{ 809{
810 struct in6_addr *saddr, *daddr; 810 const struct in6_addr *saddr, *daddr;
811 struct tcp_md5sig_pool *hp; 811 struct tcp_md5sig_pool *hp;
812 struct hash_desc *desc; 812 struct hash_desc *desc;
813 struct tcphdr *th = tcp_hdr(skb); 813 struct tcphdr *th = tcp_hdr(skb);
@@ -819,7 +819,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
819 saddr = &inet6_rsk(req)->loc_addr; 819 saddr = &inet6_rsk(req)->loc_addr;
820 daddr = &inet6_rsk(req)->rmt_addr; 820 daddr = &inet6_rsk(req)->rmt_addr;
821 } else { 821 } else {
822 struct ipv6hdr *ip6h = ipv6_hdr(skb); 822 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
823 saddr = &ip6h->saddr; 823 saddr = &ip6h->saddr;
824 daddr = &ip6h->daddr; 824 daddr = &ip6h->daddr;
825 } 825 }
@@ -857,7 +857,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
857{ 857{
858 __u8 *hash_location = NULL; 858 __u8 *hash_location = NULL;
859 struct tcp_md5sig_key *hash_expected; 859 struct tcp_md5sig_key *hash_expected;
860 struct ipv6hdr *ip6h = ipv6_hdr(skb); 860 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
861 struct tcphdr *th = tcp_hdr(skb); 861 struct tcphdr *th = tcp_hdr(skb);
862 int genhash; 862 int genhash;
863 u8 newhash[16]; 863 u8 newhash[16];
@@ -915,7 +915,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
915#endif 915#endif
916 916
917static void __tcp_v6_send_check(struct sk_buff *skb, 917static void __tcp_v6_send_check(struct sk_buff *skb,
918 struct in6_addr *saddr, struct in6_addr *daddr) 918 const struct in6_addr *saddr, const struct in6_addr *daddr)
919{ 919{
920 struct tcphdr *th = tcp_hdr(skb); 920 struct tcphdr *th = tcp_hdr(skb);
921 921
@@ -939,7 +939,7 @@ static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
939 939
940static int tcp_v6_gso_send_check(struct sk_buff *skb) 940static int tcp_v6_gso_send_check(struct sk_buff *skb)
941{ 941{
942 struct ipv6hdr *ipv6h; 942 const struct ipv6hdr *ipv6h;
943 struct tcphdr *th; 943 struct tcphdr *th;
944 944
945 if (!pskb_may_pull(skb, sizeof(*th))) 945 if (!pskb_may_pull(skb, sizeof(*th)))
@@ -957,7 +957,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
957static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, 957static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
958 struct sk_buff *skb) 958 struct sk_buff *skb)
959{ 959{
960 struct ipv6hdr *iph = skb_gro_network_header(skb); 960 const struct ipv6hdr *iph = skb_gro_network_header(skb);
961 961
962 switch (skb->ip_summed) { 962 switch (skb->ip_summed) {
963 case CHECKSUM_COMPLETE: 963 case CHECKSUM_COMPLETE:
@@ -978,7 +978,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
978 978
979static int tcp6_gro_complete(struct sk_buff *skb) 979static int tcp6_gro_complete(struct sk_buff *skb)
980{ 980{
981 struct ipv6hdr *iph = ipv6_hdr(skb); 981 const struct ipv6hdr *iph = ipv6_hdr(skb);
982 struct tcphdr *th = tcp_hdr(skb); 982 struct tcphdr *th = tcp_hdr(skb);
983 983
984 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), 984 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
@@ -1469,7 +1469,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1469 1469
1470 First: no IPv4 options. 1470 First: no IPv4 options.
1471 */ 1471 */
1472 newinet->opt = NULL; 1472 newinet->inet_opt = NULL;
1473 newnp->ipv6_fl_list = NULL; 1473 newnp->ipv6_fl_list = NULL;
1474 1474
1475 /* Clone RX bits */ 1475 /* Clone RX bits */
@@ -1702,7 +1702,7 @@ ipv6_pktoptions:
1702static int tcp_v6_rcv(struct sk_buff *skb) 1702static int tcp_v6_rcv(struct sk_buff *skb)
1703{ 1703{
1704 struct tcphdr *th; 1704 struct tcphdr *th;
1705 struct ipv6hdr *hdr; 1705 const struct ipv6hdr *hdr;
1706 struct sock *sk; 1706 struct sock *sk;
1707 int ret; 1707 int ret;
1708 struct net *net = dev_net(skb->dev); 1708 struct net *net = dev_net(skb->dev);
@@ -2028,8 +2028,8 @@ static void get_openreq6(struct seq_file *seq,
2028 struct sock *sk, struct request_sock *req, int i, int uid) 2028 struct sock *sk, struct request_sock *req, int i, int uid)
2029{ 2029{
2030 int ttd = req->expires - jiffies; 2030 int ttd = req->expires - jiffies;
2031 struct in6_addr *src = &inet6_rsk(req)->loc_addr; 2031 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2032 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr; 2032 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2033 2033
2034 if (ttd < 0) 2034 if (ttd < 0)
2035 ttd = 0; 2035 ttd = 0;
@@ -2057,7 +2057,7 @@ static void get_openreq6(struct seq_file *seq,
2057 2057
2058static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) 2058static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2059{ 2059{
2060 struct in6_addr *dest, *src; 2060 const struct in6_addr *dest, *src;
2061 __u16 destp, srcp; 2061 __u16 destp, srcp;
2062 int timer_active; 2062 int timer_active;
2063 unsigned long timer_expires; 2063 unsigned long timer_expires;
@@ -2114,7 +2114,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2114static void get_timewait6_sock(struct seq_file *seq, 2114static void get_timewait6_sock(struct seq_file *seq,
2115 struct inet_timewait_sock *tw, int i) 2115 struct inet_timewait_sock *tw, int i)
2116{ 2116{
2117 struct in6_addr *dest, *src; 2117 const struct in6_addr *dest, *src;
2118 __u16 destp, srcp; 2118 __u16 destp, srcp;
2119 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 2119 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2120 int ttd = tw->tw_ttd - jiffies; 2120 int ttd = tw->tw_ttd - jiffies;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9e305d74b3d4..fc0c42a88e54 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -311,7 +311,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
311 struct udp_table *udptable) 311 struct udp_table *udptable)
312{ 312{
313 struct sock *sk; 313 struct sock *sk;
314 struct ipv6hdr *iph = ipv6_hdr(skb); 314 const struct ipv6hdr *iph = ipv6_hdr(skb);
315 315
316 if (unlikely(sk = skb_steal_sock(skb))) 316 if (unlikely(sk = skb_steal_sock(skb)))
317 return sk; 317 return sk;
@@ -463,9 +463,9 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
463 struct udp_table *udptable) 463 struct udp_table *udptable)
464{ 464{
465 struct ipv6_pinfo *np; 465 struct ipv6_pinfo *np;
466 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 466 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
467 struct in6_addr *saddr = &hdr->saddr; 467 const struct in6_addr *saddr = &hdr->saddr;
468 struct in6_addr *daddr = &hdr->daddr; 468 const struct in6_addr *daddr = &hdr->daddr;
469 struct udphdr *uh = (struct udphdr*)(skb->data+offset); 469 struct udphdr *uh = (struct udphdr*)(skb->data+offset);
470 struct sock *sk; 470 struct sock *sk;
471 int err; 471 int err;
@@ -553,8 +553,8 @@ drop_no_sk_drops_inc:
553} 553}
554 554
555static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, 555static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
556 __be16 loc_port, struct in6_addr *loc_addr, 556 __be16 loc_port, const struct in6_addr *loc_addr,
557 __be16 rmt_port, struct in6_addr *rmt_addr, 557 __be16 rmt_port, const struct in6_addr *rmt_addr,
558 int dif) 558 int dif)
559{ 559{
560 struct hlist_nulls_node *node; 560 struct hlist_nulls_node *node;
@@ -633,7 +633,7 @@ drop:
633 * so we don't need to lock the hashes. 633 * so we don't need to lock the hashes.
634 */ 634 */
635static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 635static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
636 struct in6_addr *saddr, struct in6_addr *daddr, 636 const struct in6_addr *saddr, const struct in6_addr *daddr,
637 struct udp_table *udptable) 637 struct udp_table *udptable)
638{ 638{
639 struct sock *sk, *stack[256 / sizeof(struct sock *)]; 639 struct sock *sk, *stack[256 / sizeof(struct sock *)];
@@ -716,7 +716,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
716 struct net *net = dev_net(skb->dev); 716 struct net *net = dev_net(skb->dev);
717 struct sock *sk; 717 struct sock *sk;
718 struct udphdr *uh; 718 struct udphdr *uh;
719 struct in6_addr *saddr, *daddr; 719 const struct in6_addr *saddr, *daddr;
720 u32 ulen = 0; 720 u32 ulen = 0;
721 721
722 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 722 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
@@ -1278,7 +1278,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
1278 1278
1279static int udp6_ufo_send_check(struct sk_buff *skb) 1279static int udp6_ufo_send_check(struct sk_buff *skb)
1280{ 1280{
1281 struct ipv6hdr *ipv6h; 1281 const struct ipv6hdr *ipv6h;
1282 struct udphdr *uh; 1282 struct udphdr *uh;
1283 1283
1284 if (!pskb_may_pull(skb, sizeof(*uh))) 1284 if (!pskb_may_pull(skb, sizeof(*uh)))
@@ -1328,7 +1328,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
1328 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot 1328 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
1329 * do checksum of UDP packets sent as multiple IP fragments. 1329 * do checksum of UDP packets sent as multiple IP fragments.
1330 */ 1330 */
1331 offset = skb->csum_start - skb_headroom(skb); 1331 offset = skb_checksum_start_offset(skb);
1332 csum = skb_checksum(skb, offset, skb->len- offset, 0); 1332 csum = skb_checksum(skb, offset, skb->len- offset, 0);
1333 offset += skb->csum_offset; 1333 offset += skb->csum_offset;
1334 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 1334 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
@@ -1382,7 +1382,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
1382{ 1382{
1383 struct inet_sock *inet = inet_sk(sp); 1383 struct inet_sock *inet = inet_sk(sp);
1384 struct ipv6_pinfo *np = inet6_sk(sp); 1384 struct ipv6_pinfo *np = inet6_sk(sp);
1385 struct in6_addr *dest, *src; 1385 const struct in6_addr *dest, *src;
1386 __u16 destp, srcp; 1386 __u16 destp, srcp;
1387 1387
1388 dest = &np->daddr; 1388 dest = &np->daddr;
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index bbd48b101bae..3437d7d4eed6 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -41,10 +41,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
41{ 41{
42 struct ipv6hdr *top_iph; 42 struct ipv6hdr *top_iph;
43 struct ip_beet_phdr *ph; 43 struct ip_beet_phdr *ph;
44 struct iphdr *iphv4;
45 int optlen, hdr_len; 44 int optlen, hdr_len;
46 45
47 iphv4 = ip_hdr(skb);
48 hdr_len = 0; 46 hdr_len = 0;
49 optlen = XFRM_MODE_SKB_CB(skb)->optlen; 47 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
50 if (unlikely(optlen)) 48 if (unlikely(optlen))
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 645cb968d450..4d6edff0498f 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -20,7 +20,7 @@
20 20
21static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) 21static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
22{ 22{
23 struct ipv6hdr *outer_iph = ipv6_hdr(skb); 23 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
24 struct ipv6hdr *inner_iph = ipipv6_hdr(skb); 24 struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
25 25
26 if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph))) 26 if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
@@ -55,8 +55,8 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
55 dsfield &= ~INET_ECN_MASK; 55 dsfield &= ~INET_ECN_MASK;
56 ipv6_change_dsfield(top_iph, 0, dsfield); 56 ipv6_change_dsfield(top_iph, 0, dsfield);
57 top_iph->hop_limit = ip6_dst_hoplimit(dst->child); 57 top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
58 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); 58 ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr);
59 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); 59 ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr);
60 return 0; 60 return 0;
61} 61}
62 62
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 05e34c8ec913..d879f7efbd10 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,7 +124,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
124 struct flowi6 *fl6 = &fl->u.ip6; 124 struct flowi6 *fl6 = &fl->u.ip6;
125 int onlyproto = 0; 125 int onlyproto = 0;
126 u16 offset = skb_network_header_len(skb); 126 u16 offset = skb_network_header_len(skb);
127 struct ipv6hdr *hdr = ipv6_hdr(skb); 127 const struct ipv6hdr *hdr = ipv6_hdr(skb);
128 struct ipv6_opt_hdr *exthdr; 128 struct ipv6_opt_hdr *exthdr;
129 const unsigned char *nh = skb_network_header(skb); 129 const unsigned char *nh = skb_network_header(skb);
130 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 130 u8 nexthdr = nh[IP6CB(skb)->nhoff];
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 2969cad408de..a6770a04e3bd 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -68,7 +68,7 @@ static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
68 68
69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
70 70
71static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 71static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
72{ 72{
73 unsigned h; 73 unsigned h;
74 74
@@ -85,7 +85,7 @@ static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; 85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
86} 86}
87 87
88static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr) 88static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
89{ 89{
90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
91 struct xfrm6_tunnel_spi *x6spi; 91 struct xfrm6_tunnel_spi *x6spi;
@@ -101,7 +101,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_
101 return NULL; 101 return NULL;
102} 102}
103 103
104__be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr) 104__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
105{ 105{
106 struct xfrm6_tunnel_spi *x6spi; 106 struct xfrm6_tunnel_spi *x6spi;
107 u32 spi; 107 u32 spi;
@@ -237,10 +237,10 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
237static int xfrm6_tunnel_rcv(struct sk_buff *skb) 237static int xfrm6_tunnel_rcv(struct sk_buff *skb)
238{ 238{
239 struct net *net = dev_net(skb->dev); 239 struct net *net = dev_net(skb->dev);
240 struct ipv6hdr *iph = ipv6_hdr(skb); 240 const struct ipv6hdr *iph = ipv6_hdr(skb);
241 __be32 spi; 241 __be32 spi;
242 242
243 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&iph->saddr); 243 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; 244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
245} 245}
246 246
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index bb47021c9a55..ccd214f9d196 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -2227,8 +2227,6 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
2227static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, 2227static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
2228 struct sk_buff *skb, struct irlap_info *info) 2228 struct sk_buff *skb, struct irlap_info *info)
2229{ 2229{
2230 int ret = 0;
2231
2232 IRDA_DEBUG(1, "%s()\n", __func__); 2230 IRDA_DEBUG(1, "%s()\n", __func__);
2233 2231
2234 IRDA_ASSERT(self != NULL, return -ENODEV;); 2232 IRDA_ASSERT(self != NULL, return -ENODEV;);
@@ -2289,7 +2287,6 @@ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
2289 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, 2287 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
2290 event, irlap_event[event]); 2288 event, irlap_event[event]);
2291 2289
2292 ret = -EINVAL;
2293 break; 2290 break;
2294 } 2291 }
2295 2292
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index 318766e5dbdf..b9ac598e2116 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -65,15 +65,14 @@ static const struct irda_entry irda_dirs[] = {
65void __init irda_proc_register(void) 65void __init irda_proc_register(void)
66{ 66{
67 int i; 67 int i;
68 struct proc_dir_entry *d;
69 68
70 proc_irda = proc_mkdir("irda", init_net.proc_net); 69 proc_irda = proc_mkdir("irda", init_net.proc_net);
71 if (proc_irda == NULL) 70 if (proc_irda == NULL)
72 return; 71 return;
73 72
74 for (i = 0; i < ARRAY_SIZE(irda_dirs); i++) 73 for (i = 0; i < ARRAY_SIZE(irda_dirs); i++)
75 d = proc_create(irda_dirs[i].name, 0, proc_irda, 74 (void) proc_create(irda_dirs[i].name, 0, proc_irda,
76 irda_dirs[i].fops); 75 irda_dirs[i].fops);
77} 76}
78 77
79/* 78/*
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7db86ffcf070..d62401c25684 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -712,7 +712,7 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port
712 sin6->sin6_family = AF_INET6; 712 sin6->sin6_family = AF_INET6;
713 sin6->sin6_port = port; 713 sin6->sin6_port = port;
714 sin6->sin6_flowinfo = 0; 714 sin6->sin6_flowinfo = 0;
715 ipv6_addr_copy(&sin6->sin6_addr, (struct in6_addr *)xaddr->a6); 715 ipv6_addr_copy(&sin6->sin6_addr, (const struct in6_addr *)xaddr->a6);
716 sin6->sin6_scope_id = 0; 716 sin6->sin6_scope_id = 0;
717 return 128; 717 return 128;
718 } 718 }
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index fce9bd3bd3fe..81899600abe2 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -296,12 +296,12 @@ out_in_use:
296 296
297static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 297static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
298{ 298{
299 int rc;
300 struct inet_sock *inet = inet_sk(sk);
301 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; 299 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
300 struct inet_sock *inet = inet_sk(sk);
301 struct flowi4 fl4;
302 struct rtable *rt; 302 struct rtable *rt;
303 __be32 saddr; 303 __be32 saddr;
304 int oif; 304 int oif, rc;
305 305
306 rc = -EINVAL; 306 rc = -EINVAL;
307 if (addr_len < sizeof(*lsa)) 307 if (addr_len < sizeof(*lsa))
@@ -320,7 +320,7 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
320 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 320 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
321 goto out; 321 goto out;
322 322
323 rt = ip_route_connect(lsa->l2tp_addr.s_addr, saddr, 323 rt = ip_route_connect(&fl4, lsa->l2tp_addr.s_addr, saddr,
324 RT_CONN_FLAGS(sk), oif, 324 RT_CONN_FLAGS(sk), oif,
325 IPPROTO_L2TP, 325 IPPROTO_L2TP,
326 0, 0, sk, true); 326 0, 0, sk, true);
@@ -340,10 +340,10 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
340 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 340 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
341 341
342 if (!inet->inet_saddr) 342 if (!inet->inet_saddr)
343 inet->inet_saddr = rt->rt_src; 343 inet->inet_saddr = fl4.saddr;
344 if (!inet->inet_rcv_saddr) 344 if (!inet->inet_rcv_saddr)
345 inet->inet_rcv_saddr = rt->rt_src; 345 inet->inet_rcv_saddr = fl4.saddr;
346 inet->inet_daddr = rt->rt_dst; 346 inet->inet_daddr = fl4.daddr;
347 sk->sk_state = TCP_ESTABLISHED; 347 sk->sk_state = TCP_ESTABLISHED;
348 inet->inet_id = jiffies; 348 inet->inet_id = jiffies;
349 349
@@ -416,7 +416,6 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
416 int rc; 416 int rc;
417 struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk); 417 struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
418 struct inet_sock *inet = inet_sk(sk); 418 struct inet_sock *inet = inet_sk(sk);
419 struct ip_options *opt = inet->opt;
420 struct rtable *rt = NULL; 419 struct rtable *rt = NULL;
421 int connected = 0; 420 int connected = 0;
422 __be32 daddr; 421 __be32 daddr;
@@ -471,15 +470,23 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
471 rt = (struct rtable *) __sk_dst_check(sk, 0); 470 rt = (struct rtable *) __sk_dst_check(sk, 0);
472 471
473 if (rt == NULL) { 472 if (rt == NULL) {
473 struct ip_options_rcu *inet_opt;
474 struct flowi4 fl4;
475
476 rcu_read_lock();
477 inet_opt = rcu_dereference(inet->inet_opt);
478
474 /* Use correct destination address if we have options. */ 479 /* Use correct destination address if we have options. */
475 if (opt && opt->srr) 480 if (inet_opt && inet_opt->opt.srr)
476 daddr = opt->faddr; 481 daddr = inet_opt->opt.faddr;
482
483 rcu_read_unlock();
477 484
478 /* If this fails, retransmit mechanism of transport layer will 485 /* If this fails, retransmit mechanism of transport layer will
479 * keep trying until route appears or the connection times 486 * keep trying until route appears or the connection times
480 * itself out. 487 * itself out.
481 */ 488 */
482 rt = ip_route_output_ports(sock_net(sk), sk, 489 rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
483 daddr, inet->inet_saddr, 490 daddr, inet->inet_saddr,
484 inet->inet_dport, inet->inet_sport, 491 inet->inet_dport, inet->inet_sport,
485 sk->sk_protocol, RT_CONN_FLAGS(sk), 492 sk->sk_protocol, RT_CONN_FLAGS(sk),
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 4c1e540732d7..93a41a09458b 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -795,11 +795,12 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops
795 goto out; 795 goto out;
796 796
797 l2tp_nl_cmd_ops[pw_type] = ops; 797 l2tp_nl_cmd_ops[pw_type] = ops;
798 ret = 0;
798 799
799out: 800out:
800 genl_unlock(); 801 genl_unlock();
801err: 802err:
802 return 0; 803 return ret;
803} 804}
804EXPORT_SYMBOL_GPL(l2tp_nl_register_ops); 805EXPORT_SYMBOL_GPL(l2tp_nl_register_ops);
805 806
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 513f85cc2ae1..f5fdfcbf552a 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -2,7 +2,6 @@ config MAC80211
2 tristate "Generic IEEE 802.11 Networking Stack (mac80211)" 2 tristate "Generic IEEE 802.11 Networking Stack (mac80211)"
3 depends on CFG80211 3 depends on CFG80211
4 select CRYPTO 4 select CRYPTO
5 select CRYPTO_ECB
6 select CRYPTO_ARC4 5 select CRYPTO_ARC4
7 select CRYPTO_AES 6 select CRYPTO_AES
8 select CRC32 7 select CRC32
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 4bd6ef0be380..b9b595c08112 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -54,13 +54,12 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
54 u8 *cdata, u8 *mic) 54 u8 *cdata, u8 *mic)
55{ 55{
56 int i, j, last_len, num_blocks; 56 int i, j, last_len, num_blocks;
57 u8 *pos, *cpos, *b, *s_0, *e, *b_0, *aad; 57 u8 *pos, *cpos, *b, *s_0, *e, *b_0;
58 58
59 b = scratch; 59 b = scratch;
60 s_0 = scratch + AES_BLOCK_LEN; 60 s_0 = scratch + AES_BLOCK_LEN;
61 e = scratch + 2 * AES_BLOCK_LEN; 61 e = scratch + 2 * AES_BLOCK_LEN;
62 b_0 = scratch + 3 * AES_BLOCK_LEN; 62 b_0 = scratch + 3 * AES_BLOCK_LEN;
63 aad = scratch + 4 * AES_BLOCK_LEN;
64 63
65 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); 64 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
66 last_len = data_len % AES_BLOCK_LEN; 65 last_len = data_len % AES_BLOCK_LEN;
@@ -94,13 +93,12 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
94 u8 *cdata, size_t data_len, u8 *mic, u8 *data) 93 u8 *cdata, size_t data_len, u8 *mic, u8 *data)
95{ 94{
96 int i, j, last_len, num_blocks; 95 int i, j, last_len, num_blocks;
97 u8 *pos, *cpos, *b, *s_0, *a, *b_0, *aad; 96 u8 *pos, *cpos, *b, *s_0, *a, *b_0;
98 97
99 b = scratch; 98 b = scratch;
100 s_0 = scratch + AES_BLOCK_LEN; 99 s_0 = scratch + AES_BLOCK_LEN;
101 a = scratch + 2 * AES_BLOCK_LEN; 100 a = scratch + 2 * AES_BLOCK_LEN;
102 b_0 = scratch + 3 * AES_BLOCK_LEN; 101 b_0 = scratch + 3 * AES_BLOCK_LEN;
103 aad = scratch + 4 * AES_BLOCK_LEN;
104 102
105 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); 103 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
106 last_len = data_len % AES_BLOCK_LEN; 104 last_len = data_len % AES_BLOCK_LEN;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 44049733c4ea..12d52cec9515 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -330,6 +330,7 @@ static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, in
330static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) 330static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
331{ 331{
332 struct ieee80211_sub_if_data *sdata = sta->sdata; 332 struct ieee80211_sub_if_data *sdata = sta->sdata;
333 struct timespec uptime;
333 334
334 sinfo->generation = sdata->local->sta_generation; 335 sinfo->generation = sdata->local->sta_generation;
335 336
@@ -342,7 +343,12 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
342 STATION_INFO_TX_FAILED | 343 STATION_INFO_TX_FAILED |
343 STATION_INFO_TX_BITRATE | 344 STATION_INFO_TX_BITRATE |
344 STATION_INFO_RX_BITRATE | 345 STATION_INFO_RX_BITRATE |
345 STATION_INFO_RX_DROP_MISC; 346 STATION_INFO_RX_DROP_MISC |
347 STATION_INFO_BSS_PARAM |
348 STATION_INFO_CONNECTED_TIME;
349
350 do_posix_clock_monotonic_gettime(&uptime);
351 sinfo->connected_time = uptime.tv_sec - sta->last_connected;
346 352
347 sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); 353 sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
348 sinfo->rx_bytes = sta->rx_bytes; 354 sinfo->rx_bytes = sta->rx_bytes;
@@ -389,6 +395,16 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
389 sinfo->plink_state = sta->plink_state; 395 sinfo->plink_state = sta->plink_state;
390#endif 396#endif
391 } 397 }
398
399 sinfo->bss_param.flags = 0;
400 if (sdata->vif.bss_conf.use_cts_prot)
401 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
402 if (sdata->vif.bss_conf.use_short_preamble)
403 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
404 if (sdata->vif.bss_conf.use_short_slot)
405 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
406 sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
407 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
392} 408}
393 409
394 410
@@ -675,6 +691,12 @@ static void sta_apply_parameters(struct ieee80211_local *local,
675 if (set & BIT(NL80211_STA_FLAG_MFP)) 691 if (set & BIT(NL80211_STA_FLAG_MFP))
676 sta->flags |= WLAN_STA_MFP; 692 sta->flags |= WLAN_STA_MFP;
677 } 693 }
694
695 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
696 sta->flags &= ~WLAN_STA_AUTH;
697 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
698 sta->flags |= WLAN_STA_AUTH;
699 }
678 spin_unlock_irqrestore(&sta->flaglock, flags); 700 spin_unlock_irqrestore(&sta->flaglock, flags);
679 701
680 /* 702 /*
@@ -1023,26 +1045,26 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
1023 u8 *new_ie; 1045 u8 *new_ie;
1024 const u8 *old_ie; 1046 const u8 *old_ie;
1025 1047
1026 /* first allocate the new vendor information element */ 1048 /* allocate information elements */
1027 new_ie = NULL; 1049 new_ie = NULL;
1028 old_ie = ifmsh->vendor_ie; 1050 old_ie = ifmsh->ie;
1029 1051
1030 ifmsh->vendor_ie_len = setup->vendor_ie_len; 1052 if (setup->ie_len) {
1031 if (setup->vendor_ie_len) { 1053 new_ie = kmemdup(setup->ie, setup->ie_len,
1032 new_ie = kmemdup(setup->vendor_ie, setup->vendor_ie_len,
1033 GFP_KERNEL); 1054 GFP_KERNEL);
1034 if (!new_ie) 1055 if (!new_ie)
1035 return -ENOMEM; 1056 return -ENOMEM;
1036 } 1057 }
1058 ifmsh->ie_len = setup->ie_len;
1059 ifmsh->ie = new_ie;
1060 kfree(old_ie);
1037 1061
1038 /* now copy the rest of the setup parameters */ 1062 /* now copy the rest of the setup parameters */
1039 ifmsh->mesh_id_len = setup->mesh_id_len; 1063 ifmsh->mesh_id_len = setup->mesh_id_len;
1040 memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len); 1064 memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len);
1041 ifmsh->mesh_pp_id = setup->path_sel_proto; 1065 ifmsh->mesh_pp_id = setup->path_sel_proto;
1042 ifmsh->mesh_pm_id = setup->path_metric; 1066 ifmsh->mesh_pm_id = setup->path_metric;
1043 ifmsh->vendor_ie = new_ie; 1067 ifmsh->is_secure = setup->is_secure;
1044
1045 kfree(old_ie);
1046 1068
1047 return 0; 1069 return 0;
1048} 1070}
@@ -1611,16 +1633,13 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
1611{ 1633{
1612 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1634 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1613 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1635 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1614 int i; 1636 int i, ret;
1615
1616 /*
1617 * This _could_ be supported by providing a hook for
1618 * drivers for this function, but at this point it
1619 * doesn't seem worth bothering.
1620 */
1621 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
1622 return -EOPNOTSUPP;
1623 1637
1638 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
1639 ret = drv_set_bitrate_mask(local, sdata, mask);
1640 if (ret)
1641 return ret;
1642 }
1624 1643
1625 for (i = 0; i < IEEE80211_NUM_BANDS; i++) 1644 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
1626 sdata->rc_rateidx_mask[i] = mask->control[i].legacy; 1645 sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 51f0d780dafa..0a602dbfdb2b 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -37,7 +37,7 @@ int mac80211_format_buffer(char __user *userbuf, size_t count,
37 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 37 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
38} 38}
39 39
40#define DEBUGFS_READONLY_FILE(name, fmt, value...) \ 40#define DEBUGFS_READONLY_FILE_FN(name, fmt, value...) \
41static ssize_t name## _read(struct file *file, char __user *userbuf, \ 41static ssize_t name## _read(struct file *file, char __user *userbuf, \
42 size_t count, loff_t *ppos) \ 42 size_t count, loff_t *ppos) \
43{ \ 43{ \
@@ -45,14 +45,19 @@ static ssize_t name## _read(struct file *file, char __user *userbuf, \
45 \ 45 \
46 return mac80211_format_buffer(userbuf, count, ppos, \ 46 return mac80211_format_buffer(userbuf, count, ppos, \
47 fmt "\n", ##value); \ 47 fmt "\n", ##value); \
48} \ 48}
49 \ 49
50#define DEBUGFS_READONLY_FILE_OPS(name) \
50static const struct file_operations name## _ops = { \ 51static const struct file_operations name## _ops = { \
51 .read = name## _read, \ 52 .read = name## _read, \
52 .open = mac80211_open_file_generic, \ 53 .open = mac80211_open_file_generic, \
53 .llseek = generic_file_llseek, \ 54 .llseek = generic_file_llseek, \
54}; 55};
55 56
57#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
58 DEBUGFS_READONLY_FILE_FN(name, fmt, value) \
59 DEBUGFS_READONLY_FILE_OPS(name)
60
56#define DEBUGFS_ADD(name) \ 61#define DEBUGFS_ADD(name) \
57 debugfs_create_file(#name, 0400, phyd, local, &name## _ops); 62 debugfs_create_file(#name, 0400, phyd, local, &name## _ops);
58 63
@@ -291,11 +296,70 @@ static ssize_t channel_type_read(struct file *file, char __user *user_buf,
291 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 296 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
292} 297}
293 298
294static const struct file_operations channel_type_ops = { 299static ssize_t hwflags_read(struct file *file, char __user *user_buf,
295 .read = channel_type_read, 300 size_t count, loff_t *ppos)
296 .open = mac80211_open_file_generic, 301{
297 .llseek = default_llseek, 302 struct ieee80211_local *local = file->private_data;
298}; 303 int mxln = 500;
304 ssize_t rv;
305 char *buf = kzalloc(mxln, GFP_KERNEL);
306 int sf = 0; /* how many written so far */
307
308 sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
309 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
310 sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
311 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
312 sf += snprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
313 if (local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)
314 sf += snprintf(buf + sf, mxln - sf,
315 "HOST_BCAST_PS_BUFFERING\n");
316 if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)
317 sf += snprintf(buf + sf, mxln - sf,
318 "2GHZ_SHORT_SLOT_INCAPABLE\n");
319 if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)
320 sf += snprintf(buf + sf, mxln - sf,
321 "2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
322 if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
323 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
324 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
325 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
326 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
327 sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_PERIOD\n");
328 if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
329 sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
330 if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
331 sf += snprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
332 if (local->hw.flags & IEEE80211_HW_SUPPORTS_PS)
333 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
334 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
335 sf += snprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
336 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
337 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
338 if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
339 sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
340 if (local->hw.flags & IEEE80211_HW_BEACON_FILTER)
341 sf += snprintf(buf + sf, mxln - sf, "BEACON_FILTER\n");
342 if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
343 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
344 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
345 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_SMPS\n");
346 if (local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
347 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
348 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
349 sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n");
350 if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
351 sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
352 if (local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)
353 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_CQM_RSSI\n");
354 if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
355 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
356 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
357 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
358
359 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
360 kfree(buf);
361 return rv;
362}
299 363
300static ssize_t queues_read(struct file *file, char __user *user_buf, 364static ssize_t queues_read(struct file *file, char __user *user_buf,
301 size_t count, loff_t *ppos) 365 size_t count, loff_t *ppos)
@@ -315,11 +379,9 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
315 return simple_read_from_buffer(user_buf, count, ppos, buf, res); 379 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
316} 380}
317 381
318static const struct file_operations queues_ops = { 382DEBUGFS_READONLY_FILE_OPS(hwflags);
319 .read = queues_read, 383DEBUGFS_READONLY_FILE_OPS(channel_type);
320 .open = mac80211_open_file_generic, 384DEBUGFS_READONLY_FILE_OPS(queues);
321 .llseek = default_llseek,
322};
323 385
324/* statistics stuff */ 386/* statistics stuff */
325 387
@@ -395,6 +457,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
395 DEBUGFS_ADD(uapsd_queues); 457 DEBUGFS_ADD(uapsd_queues);
396 DEBUGFS_ADD(uapsd_max_sp_len); 458 DEBUGFS_ADD(uapsd_max_sp_len);
397 DEBUGFS_ADD(channel_type); 459 DEBUGFS_ADD(channel_type);
460 DEBUGFS_ADD(hwflags);
398 DEBUGFS_ADD(user_power); 461 DEBUGFS_ADD(user_power);
399 DEBUGFS_ADD(power); 462 DEBUGFS_ADD(power);
400 463
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index c04a1396cf8d..a01d2137fddc 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -92,6 +92,31 @@ static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
92} 92}
93STA_OPS(inactive_ms); 93STA_OPS(inactive_ms);
94 94
95
96static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf,
97 size_t count, loff_t *ppos)
98{
99 struct sta_info *sta = file->private_data;
100 struct timespec uptime;
101 struct tm result;
102 long connected_time_secs;
103 char buf[100];
104 int res;
105 do_posix_clock_monotonic_gettime(&uptime);
106 connected_time_secs = uptime.tv_sec - sta->last_connected;
107 time_to_tm(connected_time_secs, 0, &result);
108 result.tm_year -= 70;
109 result.tm_mday -= 1;
110 res = scnprintf(buf, sizeof(buf),
111 "years - %ld\nmonths - %d\ndays - %d\nclock - %d:%d:%d\n\n",
112 result.tm_year, result.tm_mon, result.tm_mday,
113 result.tm_hour, result.tm_min, result.tm_sec);
114 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
115}
116STA_OPS(connected_time);
117
118
119
95static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, 120static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
96 size_t count, loff_t *ppos) 121 size_t count, loff_t *ppos)
97{ 122{
@@ -324,6 +349,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
324 DEBUGFS_ADD(flags); 349 DEBUGFS_ADD(flags);
325 DEBUGFS_ADD(num_ps_buf_frames); 350 DEBUGFS_ADD(num_ps_buf_frames);
326 DEBUGFS_ADD(inactive_ms); 351 DEBUGFS_ADD(inactive_ms);
352 DEBUGFS_ADD(connected_time);
327 DEBUGFS_ADD(last_seq_ctrl); 353 DEBUGFS_ADD(last_seq_ctrl);
328 DEBUGFS_ADD(agg_status); 354 DEBUGFS_ADD(agg_status);
329 DEBUGFS_ADD(dev); 355 DEBUGFS_ADD(dev);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 9c0d62bb0ea3..2ddb56e5b51f 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -552,4 +552,35 @@ static inline void drv_get_ringparam(struct ieee80211_local *local,
552 trace_drv_return_void(local); 552 trace_drv_return_void(local);
553} 553}
554 554
555static inline bool drv_tx_frames_pending(struct ieee80211_local *local)
556{
557 bool ret = false;
558
559 might_sleep();
560
561 trace_drv_tx_frames_pending(local);
562 if (local->ops->tx_frames_pending)
563 ret = local->ops->tx_frames_pending(&local->hw);
564 trace_drv_return_bool(local, ret);
565
566 return ret;
567}
568
569static inline int drv_set_bitrate_mask(struct ieee80211_local *local,
570 struct ieee80211_sub_if_data *sdata,
571 const struct cfg80211_bitrate_mask *mask)
572{
573 int ret = -EOPNOTSUPP;
574
575 might_sleep();
576
577 trace_drv_set_bitrate_mask(local, sdata, mask);
578 if (local->ops->set_bitrate_mask)
579 ret = local->ops->set_bitrate_mask(&local->hw,
580 &sdata->vif, mask);
581 trace_drv_return_int(local, ret);
582
583 return ret;
584}
585
555#endif /* __MAC80211_DRIVER_OPS */ 586#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 45aab80738e2..191e834ec46b 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -74,6 +74,21 @@ TRACE_EVENT(drv_return_int,
74 TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret) 74 TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret)
75); 75);
76 76
77TRACE_EVENT(drv_return_bool,
78 TP_PROTO(struct ieee80211_local *local, bool ret),
79 TP_ARGS(local, ret),
80 TP_STRUCT__entry(
81 LOCAL_ENTRY
82 __field(bool, ret)
83 ),
84 TP_fast_assign(
85 LOCAL_ASSIGN;
86 __entry->ret = ret;
87 ),
88 TP_printk(LOCAL_PR_FMT " - %s", LOCAL_PR_ARG, (__entry->ret) ?
89 "true" : "false")
90);
91
77TRACE_EVENT(drv_return_u64, 92TRACE_EVENT(drv_return_u64,
78 TP_PROTO(struct ieee80211_local *local, u64 ret), 93 TP_PROTO(struct ieee80211_local *local, u64 ret),
79 TP_ARGS(local, ret), 94 TP_ARGS(local, ret),
@@ -964,11 +979,43 @@ TRACE_EVENT(drv_get_ringparam,
964 ) 979 )
965); 980);
966 981
982DEFINE_EVENT(local_only_evt, drv_tx_frames_pending,
983 TP_PROTO(struct ieee80211_local *local),
984 TP_ARGS(local)
985);
986
967DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait, 987DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait,
968 TP_PROTO(struct ieee80211_local *local), 988 TP_PROTO(struct ieee80211_local *local),
969 TP_ARGS(local) 989 TP_ARGS(local)
970); 990);
971 991
992TRACE_EVENT(drv_set_bitrate_mask,
993 TP_PROTO(struct ieee80211_local *local,
994 struct ieee80211_sub_if_data *sdata,
995 const struct cfg80211_bitrate_mask *mask),
996
997 TP_ARGS(local, sdata, mask),
998
999 TP_STRUCT__entry(
1000 LOCAL_ENTRY
1001 VIF_ENTRY
1002 __field(u32, legacy_2g)
1003 __field(u32, legacy_5g)
1004 ),
1005
1006 TP_fast_assign(
1007 LOCAL_ASSIGN;
1008 VIF_ASSIGN;
1009 __entry->legacy_2g = mask->control[IEEE80211_BAND_2GHZ].legacy;
1010 __entry->legacy_5g = mask->control[IEEE80211_BAND_5GHZ].legacy;
1011 ),
1012
1013 TP_printk(
1014 LOCAL_PR_FMT VIF_PR_FMT " 2G Mask:0x%x 5G Mask:0x%x",
1015 LOCAL_PR_ARG, VIF_PR_ARG, __entry->legacy_2g, __entry->legacy_5g
1016 )
1017);
1018
972/* 1019/*
973 * Tracing for API calls that drivers call. 1020 * Tracing for API calls that drivers call.
974 */ 1021 */
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 3e81af1fce58..b81860c94698 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -40,7 +40,7 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
40 struct ieee80211_mgmt *mgmt, 40 struct ieee80211_mgmt *mgmt,
41 size_t len) 41 size_t len)
42{ 42{
43 u16 auth_alg, auth_transaction, status_code; 43 u16 auth_alg, auth_transaction;
44 44
45 lockdep_assert_held(&sdata->u.ibss.mtx); 45 lockdep_assert_held(&sdata->u.ibss.mtx);
46 46
@@ -49,7 +49,6 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
49 49
50 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); 50 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
51 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); 51 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
52 status_code = le16_to_cpu(mgmt->u.auth.status_code);
53 52
54 /* 53 /*
55 * IEEE 802.11 standard does not require authentication in IBSS 54 * IEEE 802.11 standard does not require authentication in IBSS
@@ -527,8 +526,6 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
527static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) 526static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
528{ 527{
529 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 528 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
530 struct ieee80211_local *local = sdata->local;
531 struct ieee80211_supported_band *sband;
532 u8 bssid[ETH_ALEN]; 529 u8 bssid[ETH_ALEN];
533 u16 capability; 530 u16 capability;
534 int i; 531 int i;
@@ -551,8 +548,6 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
551 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 548 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
552 sdata->name, bssid); 549 sdata->name, bssid);
553 550
554 sband = local->hw.wiphy->bands[ifibss->channel->band];
555
556 capability = WLAN_CAPABILITY_IBSS; 551 capability = WLAN_CAPABILITY_IBSS;
557 552
558 if (ifibss->privacy) 553 if (ifibss->privacy)
@@ -661,7 +656,6 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
661static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, 656static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
662 struct sk_buff *req) 657 struct sk_buff *req)
663{ 658{
664 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(req);
665 struct ieee80211_mgmt *mgmt = (void *)req->data; 659 struct ieee80211_mgmt *mgmt = (void *)req->data;
666 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 660 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
667 struct ieee80211_local *local = sdata->local; 661 struct ieee80211_local *local = sdata->local;
@@ -685,7 +679,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
685 mgmt->bssid, tx_last_beacon); 679 mgmt->bssid, tx_last_beacon);
686#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 680#endif /* CONFIG_MAC80211_IBSS_DEBUG */
687 681
688 if (!tx_last_beacon && !(rx_status->rx_flags & IEEE80211_RX_RA_MATCH)) 682 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
689 return; 683 return;
690 684
691 if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 && 685 if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 &&
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c18396c248d7..027c0467d7a3 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -488,8 +488,9 @@ struct ieee80211_if_mesh {
488 struct mesh_config mshcfg; 488 struct mesh_config mshcfg;
489 u32 mesh_seqnum; 489 u32 mesh_seqnum;
490 bool accepting_plinks; 490 bool accepting_plinks;
491 const u8 *vendor_ie; 491 const u8 *ie;
492 u8 vendor_ie_len; 492 u8 ie_len;
493 bool is_secure;
493}; 494};
494 495
495#ifdef CONFIG_MAC80211_MESH 496#ifdef CONFIG_MAC80211_MESH
@@ -765,6 +766,9 @@ struct ieee80211_local {
765 766
766 int tx_headroom; /* required headroom for hardware/radiotap */ 767 int tx_headroom; /* required headroom for hardware/radiotap */
767 768
769 /* count for keys needing tailroom space allocation */
770 int crypto_tx_tailroom_needed_cnt;
771
768 /* Tasklet and skb queue to process calls from IRQ mode. All frames 772 /* Tasklet and skb queue to process calls from IRQ mode. All frames
769 * added to skb_queue will be processed, but frames in 773 * added to skb_queue will be processed, but frames in
770 * skb_queue_unreliable may be dropped if the total length of these 774 * skb_queue_unreliable may be dropped if the total length of these
@@ -809,8 +813,8 @@ struct ieee80211_local {
809 813
810 struct rate_control_ref *rate_ctrl; 814 struct rate_control_ref *rate_ctrl;
811 815
812 struct crypto_blkcipher *wep_tx_tfm; 816 struct crypto_cipher *wep_tx_tfm;
813 struct crypto_blkcipher *wep_rx_tfm; 817 struct crypto_cipher *wep_rx_tfm;
814 u32 wep_iv; 818 u32 wep_iv;
815 819
816 /* see iface.c */ 820 /* see iface.c */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 4054399be907..80c29d626aa4 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1144,10 +1144,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1144 + IEEE80211_ENCRYPT_HEADROOM; 1144 + IEEE80211_ENCRYPT_HEADROOM;
1145 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; 1145 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
1146 1146
1147 ret = dev_alloc_name(ndev, ndev->name);
1148 if (ret < 0)
1149 goto fail;
1150
1151 ieee80211_assign_perm_addr(local, ndev, type); 1147 ieee80211_assign_perm_addr(local, ndev, type);
1152 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); 1148 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
1153 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); 1149 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index af3c56482c80..b510721e3b3d 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -101,6 +101,11 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
101 101
102 if (!ret) { 102 if (!ret) {
103 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 103 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
104
105 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
106 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
107 key->local->crypto_tx_tailroom_needed_cnt--;
108
104 return 0; 109 return 0;
105 } 110 }
106 111
@@ -156,6 +161,10 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
156 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); 161 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
157 162
158 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 163 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
164
165 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
166 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
167 key->local->crypto_tx_tailroom_needed_cnt++;
159} 168}
160 169
161void ieee80211_key_removed(struct ieee80211_key_conf *key_conf) 170void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
@@ -388,8 +397,10 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
388 ieee80211_aes_key_free(key->u.ccmp.tfm); 397 ieee80211_aes_key_free(key->u.ccmp.tfm);
389 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC) 398 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
390 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); 399 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
391 if (key->local) 400 if (key->local) {
392 ieee80211_debugfs_key_remove(key); 401 ieee80211_debugfs_key_remove(key);
402 key->local->crypto_tx_tailroom_needed_cnt--;
403 }
393 404
394 kfree(key); 405 kfree(key);
395} 406}
@@ -451,6 +462,8 @@ int ieee80211_key_link(struct ieee80211_key *key,
451 462
452 ieee80211_debugfs_key_add(key); 463 ieee80211_debugfs_key_add(key);
453 464
465 key->local->crypto_tx_tailroom_needed_cnt++;
466
454 ret = ieee80211_key_enable_hw_accel(key); 467 ret = ieee80211_key_enable_hw_accel(key);
455 468
456 mutex_unlock(&sdata->local->key_mtx); 469 mutex_unlock(&sdata->local->key_mtx);
@@ -492,8 +505,12 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
492 505
493 mutex_lock(&sdata->local->key_mtx); 506 mutex_lock(&sdata->local->key_mtx);
494 507
495 list_for_each_entry(key, &sdata->key_list, list) 508 sdata->local->crypto_tx_tailroom_needed_cnt = 0;
509
510 list_for_each_entry(key, &sdata->key_list, list) {
511 sdata->local->crypto_tx_tailroom_needed_cnt++;
496 ieee80211_key_enable_hw_accel(key); 512 ieee80211_key_enable_hw_accel(key);
513 }
497 514
498 mutex_unlock(&sdata->local->key_mtx); 515 mutex_unlock(&sdata->local->key_mtx);
499} 516}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 562d2984c482..61877662e8f8 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -33,12 +33,6 @@
33#include "cfg.h" 33#include "cfg.h"
34#include "debugfs.h" 34#include "debugfs.h"
35 35
36
37static bool ieee80211_disable_40mhz_24ghz;
38module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
39MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
40 "Disable 40MHz support in the 2.4GHz band");
41
42static struct lock_class_key ieee80211_rx_skb_queue_class; 36static struct lock_class_key ieee80211_rx_skb_queue_class;
43 37
44void ieee80211_configure_filter(struct ieee80211_local *local) 38void ieee80211_configure_filter(struct ieee80211_local *local)
@@ -545,7 +539,9 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
545 }, 539 },
546 [NL80211_IFTYPE_MESH_POINT] = { 540 [NL80211_IFTYPE_MESH_POINT] = {
547 .tx = 0xffff, 541 .tx = 0xffff,
548 .rx = BIT(IEEE80211_STYPE_ACTION >> 4), 542 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
543 BIT(IEEE80211_STYPE_AUTH >> 4) |
544 BIT(IEEE80211_STYPE_DEAUTH >> 4),
549 }, 545 },
550}; 546};
551 547
@@ -726,18 +722,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
726 } 722 }
727 channels += sband->n_channels; 723 channels += sband->n_channels;
728 724
729 /*
730 * Since ieee80211_disable_40mhz_24ghz is global, we can
731 * modify the sband's ht data even if the driver uses a
732 * global structure for that.
733 */
734 if (ieee80211_disable_40mhz_24ghz &&
735 band == IEEE80211_BAND_2GHZ &&
736 sband->ht_cap.ht_supported) {
737 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
738 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
739 }
740
741 if (max_bitrates < sband->n_bitrates) 725 if (max_bitrates < sband->n_bitrates)
742 max_bitrates = sband->n_bitrates; 726 max_bitrates = sband->n_bitrates;
743 supp_ht = supp_ht || sband->ht_cap.ht_supported; 727 supp_ht = supp_ht || sband->ht_cap.ht_supported;
@@ -760,6 +744,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
760 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); 744 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
761#endif 745#endif
762 746
747 /* if the underlying driver supports mesh, mac80211 will (at least)
748 * provide routing of mesh authentication frames to userspace */
749 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
750 local->hw.wiphy->flags |= WIPHY_FLAG_MESH_AUTH;
751
763 /* mac80211 supports control port protocol changing */ 752 /* mac80211 supports control port protocol changing */
764 local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL; 753 local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
765 754
@@ -879,10 +868,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
879 868
880 local->dynamic_ps_forced_timeout = -1; 869 local->dynamic_ps_forced_timeout = -1;
881 870
882 result = sta_info_start(local);
883 if (result < 0)
884 goto fail_sta_info;
885
886 result = ieee80211_wep_init(local); 871 result = ieee80211_wep_init(local);
887 if (result < 0) 872 if (result < 0)
888 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 873 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
@@ -945,7 +930,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
945 rtnl_unlock(); 930 rtnl_unlock();
946 ieee80211_wep_free(local); 931 ieee80211_wep_free(local);
947 sta_info_stop(local); 932 sta_info_stop(local);
948 fail_sta_info:
949 destroy_workqueue(local->workqueue); 933 destroy_workqueue(local->workqueue);
950 fail_workqueue: 934 fail_workqueue:
951 wiphy_unregister(local->hw.wiphy); 935 wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 2a57cc02c618..c1299e249541 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -279,9 +279,9 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
279 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; 279 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
280 *pos++ = 0x00; 280 *pos++ = 0x00;
281 281
282 if (sdata->u.mesh.vendor_ie) { 282 if (sdata->u.mesh.ie) {
283 int len = sdata->u.mesh.vendor_ie_len; 283 int len = sdata->u.mesh.ie_len;
284 const u8 *data = sdata->u.mesh.vendor_ie; 284 const u8 *data = sdata->u.mesh.ie;
285 if (skb_tailroom(skb) > len) 285 if (skb_tailroom(skb) > len)
286 memcpy(skb_put(skb, len), data, len); 286 memcpy(skb_put(skb, len), data, len);
287 } 287 }
@@ -573,6 +573,10 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
573 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, 573 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
574 &elems); 574 &elems);
575 575
576 /* ignore beacons from secure mesh peers if our security is off */
577 if (elems.rsn_len && !sdata->u.mesh.is_secure)
578 return;
579
576 if (elems.ds_params && elems.ds_params_len == 1) 580 if (elems.ds_params && elems.ds_params_len == 1)
577 freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); 581 freq = ieee80211_channel_to_frequency(elems.ds_params[0], band);
578 else 582 else
@@ -586,9 +590,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
586 if (elems.mesh_id && elems.mesh_config && 590 if (elems.mesh_id && elems.mesh_config &&
587 mesh_matches_local(&elems, sdata)) { 591 mesh_matches_local(&elems, sdata)) {
588 supp_rates = ieee80211_sta_get_rates(local, &elems, band); 592 supp_rates = ieee80211_sta_get_rates(local, &elems, band);
589 593 mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems);
590 mesh_neighbour_update(mgmt->sa, supp_rates, sdata,
591 mesh_peer_accepts_plinks(&elems));
592 } 594 }
593} 595}
594 596
@@ -611,12 +613,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
611 struct sk_buff *skb) 613 struct sk_buff *skb)
612{ 614{
613 struct ieee80211_rx_status *rx_status; 615 struct ieee80211_rx_status *rx_status;
614 struct ieee80211_if_mesh *ifmsh;
615 struct ieee80211_mgmt *mgmt; 616 struct ieee80211_mgmt *mgmt;
616 u16 stype; 617 u16 stype;
617 618
618 ifmsh = &sdata->u.mesh;
619
620 rx_status = IEEE80211_SKB_RXCB(skb); 619 rx_status = IEEE80211_SKB_RXCB(skb);
621 mgmt = (struct ieee80211_mgmt *) skb->data; 620 mgmt = (struct ieee80211_mgmt *) skb->data;
622 stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; 621 stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index b99e230fe31c..10acf1cc8082 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -226,7 +226,8 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
226int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); 226int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
227/* Mesh plinks */ 227/* Mesh plinks */
228void mesh_neighbour_update(u8 *hw_addr, u32 rates, 228void mesh_neighbour_update(u8 *hw_addr, u32 rates,
229 struct ieee80211_sub_if_data *sdata, bool add); 229 struct ieee80211_sub_if_data *sdata,
230 struct ieee802_11_elems *ie);
230bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); 231bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
231void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 232void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
232void mesh_plink_broken(struct sta_info *sta); 233void mesh_plink_broken(struct sta_info *sta);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 5bf64d7112b3..e57f2e728cfe 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -633,7 +633,6 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
633 struct mesh_path *mpath; 633 struct mesh_path *mpath;
634 u8 ttl; 634 u8 ttl;
635 u8 *ta, *target_addr; 635 u8 *ta, *target_addr;
636 u8 target_flags;
637 u32 target_sn; 636 u32 target_sn;
638 u16 target_rcode; 637 u16 target_rcode;
639 638
@@ -644,7 +643,6 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
644 return; 643 return;
645 } 644 }
646 ttl--; 645 ttl--;
647 target_flags = PERR_IE_TARGET_FLAGS(perr_elem);
648 target_addr = PERR_IE_TARGET_ADDR(perr_elem); 646 target_addr = PERR_IE_TARGET_ADDR(perr_elem);
649 target_sn = PERR_IE_TARGET_SN(perr_elem); 647 target_sn = PERR_IE_TARGET_SN(perr_elem);
650 target_rcode = PERR_IE_TARGET_RCODE(perr_elem); 648 target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
@@ -675,12 +673,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
675{ 673{
676 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 674 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
677 struct mesh_path *mpath; 675 struct mesh_path *mpath;
678 u8 *ta;
679 u8 ttl, flags, hopcount; 676 u8 ttl, flags, hopcount;
680 u8 *orig_addr; 677 u8 *orig_addr;
681 u32 orig_sn, metric; 678 u32 orig_sn, metric;
682 679
683 ta = mgmt->sa;
684 ttl = rann->rann_ttl; 680 ttl = rann->rann_ttl;
685 if (ttl <= 1) { 681 if (ttl <= 1) {
686 ifmsh->mshstats.dropped_frames_ttl++; 682 ifmsh->mshstats.dropped_frames_ttl++;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 336ca9d0c5c4..35c715adaae2 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -65,42 +65,37 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
65 __mesh_table_free(tbl); 65 __mesh_table_free(tbl);
66} 66}
67 67
68static struct mesh_table *mesh_table_grow(struct mesh_table *tbl) 68static int mesh_table_grow(struct mesh_table *oldtbl,
69 struct mesh_table *newtbl)
69{ 70{
70 struct mesh_table *newtbl;
71 struct hlist_head *oldhash; 71 struct hlist_head *oldhash;
72 struct hlist_node *p, *q; 72 struct hlist_node *p, *q;
73 int i; 73 int i;
74 74
75 if (atomic_read(&tbl->entries) 75 if (atomic_read(&oldtbl->entries)
76 < tbl->mean_chain_len * (tbl->hash_mask + 1)) 76 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
77 goto endgrow; 77 return -EAGAIN;
78 78
79 newtbl = mesh_table_alloc(tbl->size_order + 1);
80 if (!newtbl)
81 goto endgrow;
82 79
83 newtbl->free_node = tbl->free_node; 80 newtbl->free_node = oldtbl->free_node;
84 newtbl->mean_chain_len = tbl->mean_chain_len; 81 newtbl->mean_chain_len = oldtbl->mean_chain_len;
85 newtbl->copy_node = tbl->copy_node; 82 newtbl->copy_node = oldtbl->copy_node;
86 atomic_set(&newtbl->entries, atomic_read(&tbl->entries)); 83 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
87 84
88 oldhash = tbl->hash_buckets; 85 oldhash = oldtbl->hash_buckets;
89 for (i = 0; i <= tbl->hash_mask; i++) 86 for (i = 0; i <= oldtbl->hash_mask; i++)
90 hlist_for_each(p, &oldhash[i]) 87 hlist_for_each(p, &oldhash[i])
91 if (tbl->copy_node(p, newtbl) < 0) 88 if (oldtbl->copy_node(p, newtbl) < 0)
92 goto errcopy; 89 goto errcopy;
93 90
94 return newtbl; 91 return 0;
95 92
96errcopy: 93errcopy:
97 for (i = 0; i <= newtbl->hash_mask; i++) { 94 for (i = 0; i <= newtbl->hash_mask; i++) {
98 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) 95 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
99 tbl->free_node(p, 0); 96 oldtbl->free_node(p, 0);
100 } 97 }
101 __mesh_table_free(newtbl); 98 return -ENOMEM;
102endgrow:
103 return NULL;
104} 99}
105 100
106 101
@@ -334,10 +329,13 @@ void mesh_mpath_table_grow(void)
334{ 329{
335 struct mesh_table *oldtbl, *newtbl; 330 struct mesh_table *oldtbl, *newtbl;
336 331
332 newtbl = mesh_table_alloc(mesh_paths->size_order + 1);
333 if (!newtbl)
334 return;
337 write_lock(&pathtbl_resize_lock); 335 write_lock(&pathtbl_resize_lock);
338 oldtbl = mesh_paths; 336 oldtbl = mesh_paths;
339 newtbl = mesh_table_grow(mesh_paths); 337 if (mesh_table_grow(mesh_paths, newtbl) < 0) {
340 if (!newtbl) { 338 __mesh_table_free(newtbl);
341 write_unlock(&pathtbl_resize_lock); 339 write_unlock(&pathtbl_resize_lock);
342 return; 340 return;
343 } 341 }
@@ -352,10 +350,13 @@ void mesh_mpp_table_grow(void)
352{ 350{
353 struct mesh_table *oldtbl, *newtbl; 351 struct mesh_table *oldtbl, *newtbl;
354 352
353 newtbl = mesh_table_alloc(mpp_paths->size_order + 1);
354 if (!newtbl)
355 return;
355 write_lock(&pathtbl_resize_lock); 356 write_lock(&pathtbl_resize_lock);
356 oldtbl = mpp_paths; 357 oldtbl = mpp_paths;
357 newtbl = mesh_table_grow(mpp_paths); 358 if (mesh_table_grow(mpp_paths, newtbl) < 0) {
358 if (!newtbl) { 359 __mesh_table_free(newtbl);
359 write_unlock(&pathtbl_resize_lock); 360 write_unlock(&pathtbl_resize_lock);
360 return; 361 return;
361 } 362 }
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 44b53931ba5e..84e5b056af02 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -105,7 +105,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
105 if (!sta) 105 if (!sta)
106 return NULL; 106 return NULL;
107 107
108 sta->flags = WLAN_STA_AUTHORIZED; 108 sta->flags = WLAN_STA_AUTHORIZED | WLAN_STA_AUTH;
109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
110 rate_control_rate_init(sta); 110 rate_control_rate_init(sta);
111 111
@@ -161,7 +161,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
161 __le16 reason) { 161 __le16 reason) {
162 struct ieee80211_local *local = sdata->local; 162 struct ieee80211_local *local = sdata->local;
163 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 + 163 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
164 sdata->u.mesh.vendor_ie_len); 164 sdata->u.mesh.ie_len);
165 struct ieee80211_mgmt *mgmt; 165 struct ieee80211_mgmt *mgmt;
166 bool include_plid = false; 166 bool include_plid = false;
167 static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A }; 167 static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A };
@@ -237,8 +237,9 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
237 return 0; 237 return 0;
238} 238}
239 239
240void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data *sdata, 240void mesh_neighbour_update(u8 *hw_addr, u32 rates,
241 bool peer_accepting_plinks) 241 struct ieee80211_sub_if_data *sdata,
242 struct ieee802_11_elems *elems)
242{ 243{
243 struct ieee80211_local *local = sdata->local; 244 struct ieee80211_local *local = sdata->local;
244 struct sta_info *sta; 245 struct sta_info *sta;
@@ -248,8 +249,14 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
248 sta = sta_info_get(sdata, hw_addr); 249 sta = sta_info_get(sdata, hw_addr);
249 if (!sta) { 250 if (!sta) {
250 rcu_read_unlock(); 251 rcu_read_unlock();
251 252 /* Userspace handles peer allocation when security is enabled
252 sta = mesh_plink_alloc(sdata, hw_addr, rates); 253 * */
254 if (sdata->u.mesh.is_secure)
255 cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
256 elems->ie_start, elems->total_len,
257 GFP_KERNEL);
258 else
259 sta = mesh_plink_alloc(sdata, hw_addr, rates);
253 if (!sta) 260 if (!sta)
254 return; 261 return;
255 if (sta_info_insert_rcu(sta)) { 262 if (sta_info_insert_rcu(sta)) {
@@ -260,7 +267,8 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
260 267
261 sta->last_rx = jiffies; 268 sta->last_rx = jiffies;
262 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 269 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
263 if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN && 270 if (mesh_peer_accepts_plinks(elems) &&
271 sta->plink_state == PLINK_LISTEN &&
264 sdata->u.mesh.accepting_plinks && 272 sdata->u.mesh.accepting_plinks &&
265 sdata->u.mesh.mshcfg.auto_open_plinks) 273 sdata->u.mesh.mshcfg.auto_open_plinks)
266 mesh_plink_open(sta); 274 mesh_plink_open(sta);
@@ -372,6 +380,9 @@ int mesh_plink_open(struct sta_info *sta)
372 __le16 llid; 380 __le16 llid;
373 struct ieee80211_sub_if_data *sdata = sta->sdata; 381 struct ieee80211_sub_if_data *sdata = sta->sdata;
374 382
383 if (!test_sta_flags(sta, WLAN_STA_AUTH))
384 return -EPERM;
385
375 spin_lock_bh(&sta->lock); 386 spin_lock_bh(&sta->lock);
376 get_random_bytes(&llid, 2); 387 get_random_bytes(&llid, 2);
377 sta->llid = llid; 388 sta->llid = llid;
@@ -449,6 +460,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
449 mpl_dbg("Mesh plink: missing necessary peer link ie\n"); 460 mpl_dbg("Mesh plink: missing necessary peer link ie\n");
450 return; 461 return;
451 } 462 }
463 if (elems.rsn_len && !sdata->u.mesh.is_secure) {
464 mpl_dbg("Mesh plink: can't establish link with secure peer\n");
465 return;
466 }
452 467
453 ftype = mgmt->u.action.u.plink_action.action_code; 468 ftype = mgmt->u.action.u.plink_action.action_code;
454 ie_len = elems.peer_link_len; 469 ie_len = elems.peer_link_len;
@@ -480,6 +495,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
480 return; 495 return;
481 } 496 }
482 497
498 if (sta && !test_sta_flags(sta, WLAN_STA_AUTH)) {
499 mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
500 rcu_read_unlock();
501 return;
502 }
503
483 if (sta && sta->plink_state == PLINK_BLOCKED) { 504 if (sta && sta->plink_state == PLINK_BLOCKED) {
484 rcu_read_unlock(); 505 rcu_read_unlock();
485 return; 506 return;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 64d92d5a7f40..a41f234bd486 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -90,20 +90,11 @@ enum rx_mgmt_action {
90 /* no action required */ 90 /* no action required */
91 RX_MGMT_NONE, 91 RX_MGMT_NONE,
92 92
93 /* caller must call cfg80211_send_rx_auth() */
94 RX_MGMT_CFG80211_AUTH,
95
96 /* caller must call cfg80211_send_rx_assoc() */
97 RX_MGMT_CFG80211_ASSOC,
98
99 /* caller must call cfg80211_send_deauth() */ 93 /* caller must call cfg80211_send_deauth() */
100 RX_MGMT_CFG80211_DEAUTH, 94 RX_MGMT_CFG80211_DEAUTH,
101 95
102 /* caller must call cfg80211_send_disassoc() */ 96 /* caller must call cfg80211_send_disassoc() */
103 RX_MGMT_CFG80211_DISASSOC, 97 RX_MGMT_CFG80211_DISASSOC,
104
105 /* caller must tell cfg80211 about internal error */
106 RX_MGMT_CFG80211_ASSOC_ERROR,
107}; 98};
108 99
109/* utils */ 100/* utils */
@@ -770,15 +761,16 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
770 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && 761 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
771 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) { 762 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
772 netif_tx_stop_all_queues(sdata->dev); 763 netif_tx_stop_all_queues(sdata->dev);
773 /*
774 * Flush all the frames queued in the driver before
775 * going to power save
776 */
777 drv_flush(local, false);
778 ieee80211_send_nullfunc(local, sdata, 1);
779 764
780 /* Flush once again to get the tx status of nullfunc frame */ 765 if (drv_tx_frames_pending(local))
781 drv_flush(local, false); 766 mod_timer(&local->dynamic_ps_timer, jiffies +
767 msecs_to_jiffies(
768 local->hw.conf.dynamic_ps_timeout));
769 else {
770 ieee80211_send_nullfunc(local, sdata, 1);
771 /* Flush to get the tx status of nullfunc frame */
772 drv_flush(local, false);
773 }
782 } 774 }
783 775
784 if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) && 776 if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index e37355193ed1..042461710880 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -14,12 +14,23 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
14 14
15 ieee80211_scan_cancel(local); 15 ieee80211_scan_cancel(local);
16 16
17 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
18 mutex_lock(&local->sta_mtx);
19 list_for_each_entry(sta, &local->sta_list, list) {
20 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
21 ieee80211_sta_tear_down_BA_sessions(sta, true);
22 }
23 mutex_unlock(&local->sta_mtx);
24 }
25
17 ieee80211_stop_queues_by_reason(hw, 26 ieee80211_stop_queues_by_reason(hw,
18 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 27 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
19 28
20 /* flush out all packets */ 29 /* flush out all packets */
21 synchronize_net(); 30 synchronize_net();
22 31
32 drv_flush(local, false);
33
23 local->quiescing = true; 34 local->quiescing = true;
24 /* make quiescing visible to timers everywhere */ 35 /* make quiescing visible to timers everywhere */
25 mb(); 36 mb();
@@ -43,11 +54,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
43 /* tear down aggregation sessions and remove STAs */ 54 /* tear down aggregation sessions and remove STAs */
44 mutex_lock(&local->sta_mtx); 55 mutex_lock(&local->sta_mtx);
45 list_for_each_entry(sta, &local->sta_list, list) { 56 list_for_each_entry(sta, &local->sta_list, list) {
46 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
47 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
48 ieee80211_sta_tear_down_BA_sessions(sta, true);
49 }
50
51 if (sta->uploaded) { 57 if (sta->uploaded) {
52 sdata = sta->sdata; 58 sdata = sta->sdata;
53 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 59 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c5d4530d8284..13a6697651ad 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -143,7 +143,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
143 if (status->flag & RX_FLAG_HT) { 143 if (status->flag & RX_FLAG_HT) {
144 /* 144 /*
145 * MCS information is a separate field in radiotap, 145 * MCS information is a separate field in radiotap,
146 * added below. 146 * added below. The byte here is needed as padding
147 * for the channel though, so initialise it to 0.
147 */ 148 */
148 *pos = 0; 149 *pos = 0;
149 } else { 150 } else {
@@ -502,7 +503,8 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
502 503
503 if (ieee80211_is_probe_req(hdr->frame_control) || 504 if (ieee80211_is_probe_req(hdr->frame_control) ||
504 ieee80211_is_probe_resp(hdr->frame_control) || 505 ieee80211_is_probe_resp(hdr->frame_control) ||
505 ieee80211_is_beacon(hdr->frame_control)) 506 ieee80211_is_beacon(hdr->frame_control) ||
507 ieee80211_is_auth(hdr->frame_control))
506 return RX_CONTINUE; 508 return RX_CONTINUE;
507 509
508 return RX_DROP_MONITOR; 510 return RX_DROP_MONITOR;
@@ -650,7 +652,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
650 set_release_timer: 652 set_release_timer:
651 653
652 mod_timer(&tid_agg_rx->reorder_timer, 654 mod_timer(&tid_agg_rx->reorder_timer,
653 tid_agg_rx->reorder_time[j] + 655 tid_agg_rx->reorder_time[j] + 1 +
654 HT_RX_REORDER_BUF_TIMEOUT); 656 HT_RX_REORDER_BUF_TIMEOUT);
655 } else { 657 } else {
656 del_timer(&tid_agg_rx->reorder_timer); 658 del_timer(&tid_agg_rx->reorder_timer);
@@ -707,6 +709,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
707 /* 709 /*
708 * If the current MPDU is in the right order and nothing else 710 * If the current MPDU is in the right order and nothing else
709 * is stored we can process it directly, no need to buffer it. 711 * is stored we can process it directly, no need to buffer it.
712 * If it is first but there's something stored, we may be able
713 * to release frames after this one.
710 */ 714 */
711 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 715 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
712 tid_agg_rx->stored_mpdu_num == 0) { 716 tid_agg_rx->stored_mpdu_num == 0) {
@@ -1583,7 +1587,7 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1583} 1587}
1584 1588
1585static int 1589static int
1586__ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1590__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1587{ 1591{
1588 struct ieee80211_sub_if_data *sdata = rx->sdata; 1592 struct ieee80211_sub_if_data *sdata = rx->sdata;
1589 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1593 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
@@ -1591,6 +1595,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1591 struct ethhdr *ehdr; 1595 struct ethhdr *ehdr;
1592 int ret; 1596 int ret;
1593 1597
1598 *port_control = false;
1594 if (ieee80211_has_a4(hdr->frame_control) && 1599 if (ieee80211_has_a4(hdr->frame_control) &&
1595 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 1600 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1596 return -1; 1601 return -1;
@@ -1609,11 +1614,13 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1609 return -1; 1614 return -1;
1610 1615
1611 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 1616 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1612 if (ret < 0 || !check_port_control) 1617 if (ret < 0)
1613 return ret; 1618 return ret;
1614 1619
1615 ehdr = (struct ethhdr *) rx->skb->data; 1620 ehdr = (struct ethhdr *) rx->skb->data;
1616 if (ehdr->h_proto != rx->sdata->control_port_protocol) 1621 if (ehdr->h_proto == rx->sdata->control_port_protocol)
1622 *port_control = true;
1623 else if (check_port_control)
1617 return -1; 1624 return -1;
1618 1625
1619 return 0; 1626 return 0;
@@ -1914,6 +1921,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1914 struct net_device *dev = sdata->dev; 1921 struct net_device *dev = sdata->dev;
1915 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1922 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1916 __le16 fc = hdr->frame_control; 1923 __le16 fc = hdr->frame_control;
1924 bool port_control;
1917 int err; 1925 int err;
1918 1926
1919 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 1927 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
@@ -1930,13 +1938,21 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1930 sdata->vif.type == NL80211_IFTYPE_AP) 1938 sdata->vif.type == NL80211_IFTYPE_AP)
1931 return RX_DROP_MONITOR; 1939 return RX_DROP_MONITOR;
1932 1940
1933 err = __ieee80211_data_to_8023(rx); 1941 err = __ieee80211_data_to_8023(rx, &port_control);
1934 if (unlikely(err)) 1942 if (unlikely(err))
1935 return RX_DROP_UNUSABLE; 1943 return RX_DROP_UNUSABLE;
1936 1944
1937 if (!ieee80211_frame_allowed(rx, fc)) 1945 if (!ieee80211_frame_allowed(rx, fc))
1938 return RX_DROP_MONITOR; 1946 return RX_DROP_MONITOR;
1939 1947
1948 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1949 unlikely(port_control) && sdata->bss) {
1950 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
1951 u.ap);
1952 dev = sdata->dev;
1953 rx->sdata = sdata;
1954 }
1955
1940 rx->skb->dev = dev; 1956 rx->skb->dev = dev;
1941 1957
1942 dev->stats.rx_packets++; 1958 dev->stats.rx_packets++;
@@ -2352,47 +2368,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2352 return RX_QUEUED; 2368 return RX_QUEUED;
2353} 2369}
2354 2370
2355static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
2356 struct ieee80211_rx_data *rx)
2357{
2358 int keyidx;
2359 unsigned int hdrlen;
2360
2361 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2362 if (rx->skb->len >= hdrlen + 4)
2363 keyidx = rx->skb->data[hdrlen + 3] >> 6;
2364 else
2365 keyidx = -1;
2366
2367 if (!rx->sta) {
2368 /*
2369 * Some hardware seem to generate incorrect Michael MIC
2370 * reports; ignore them to avoid triggering countermeasures.
2371 */
2372 return;
2373 }
2374
2375 if (!ieee80211_has_protected(hdr->frame_control))
2376 return;
2377
2378 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
2379 /*
2380 * APs with pairwise keys should never receive Michael MIC
2381 * errors for non-zero keyidx because these are reserved for
2382 * group keys and only the AP is sending real multicast
2383 * frames in the BSS.
2384 */
2385 return;
2386 }
2387
2388 if (!ieee80211_is_data(hdr->frame_control) &&
2389 !ieee80211_is_auth(hdr->frame_control))
2390 return;
2391
2392 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
2393 GFP_ATOMIC);
2394}
2395
2396/* TODO: use IEEE80211_RX_FRAGMENTED */ 2371/* TODO: use IEEE80211_RX_FRAGMENTED */
2397static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 2372static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2398 struct ieee80211_rate *rate) 2373 struct ieee80211_rate *rate)
@@ -2736,12 +2711,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2736 if (!prepares) 2711 if (!prepares)
2737 return false; 2712 return false;
2738 2713
2739 if (status->flag & RX_FLAG_MMIC_ERROR) {
2740 if (status->rx_flags & IEEE80211_RX_RA_MATCH)
2741 ieee80211_rx_michael_mic_report(hdr, rx);
2742 return false;
2743 }
2744
2745 if (!consume) { 2714 if (!consume) {
2746 skb = skb_copy(skb, GFP_ATOMIC); 2715 skb = skb_copy(skb, GFP_ATOMIC);
2747 if (!skb) { 2716 if (!skb) {
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 13e8c30adf01..d9e6e81ff6b2 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -228,6 +228,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
228{ 228{
229 struct ieee80211_local *local = sdata->local; 229 struct ieee80211_local *local = sdata->local;
230 struct sta_info *sta; 230 struct sta_info *sta;
231 struct timespec uptime;
231 int i; 232 int i;
232 233
233 sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp); 234 sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp);
@@ -245,6 +246,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
245 sta->sdata = sdata; 246 sta->sdata = sdata;
246 sta->last_rx = jiffies; 247 sta->last_rx = jiffies;
247 248
249 do_posix_clock_monotonic_gettime(&uptime);
250 sta->last_connected = uptime.tv_sec;
248 ewma_init(&sta->avg_signal, 1024, 8); 251 ewma_init(&sta->avg_signal, 1024, 8);
249 252
250 if (sta_prepare_rate_control(local, sta, gfp)) { 253 if (sta_prepare_rate_control(local, sta, gfp)) {
@@ -584,7 +587,6 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
584{ 587{
585 unsigned long flags; 588 unsigned long flags;
586 struct sk_buff *skb; 589 struct sk_buff *skb;
587 struct ieee80211_sub_if_data *sdata;
588 590
589 if (skb_queue_empty(&sta->ps_tx_buf)) 591 if (skb_queue_empty(&sta->ps_tx_buf))
590 return false; 592 return false;
@@ -601,7 +603,6 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
601 if (!skb) 603 if (!skb)
602 break; 604 break;
603 605
604 sdata = sta->sdata;
605 local->total_ps_buffered--; 606 local->total_ps_buffered--;
606#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 607#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
607 printk(KERN_DEBUG "Buffered frame expired (STA %pM)\n", 608 printk(KERN_DEBUG "Buffered frame expired (STA %pM)\n",
@@ -609,7 +610,8 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
609#endif 610#endif
610 dev_kfree_skb(skb); 611 dev_kfree_skb(skb);
611 612
612 if (skb_queue_empty(&sta->ps_tx_buf)) 613 if (skb_queue_empty(&sta->ps_tx_buf) &&
614 !test_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF))
613 sta_info_clear_tim_bit(sta); 615 sta_info_clear_tim_bit(sta);
614 } 616 }
615 617
@@ -698,6 +700,8 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
698#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 700#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
699 cancel_work_sync(&sta->drv_unblock_wk); 701 cancel_work_sync(&sta->drv_unblock_wk);
700 702
703 cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL);
704
701 rate_control_remove_sta_debugfs(sta); 705 rate_control_remove_sta_debugfs(sta);
702 ieee80211_sta_debugfs_remove(sta); 706 ieee80211_sta_debugfs_remove(sta);
703 707
@@ -766,9 +770,8 @@ static void sta_info_cleanup(unsigned long data)
766 if (!timer_needed) 770 if (!timer_needed)
767 return; 771 return;
768 772
769 local->sta_cleanup.expires = 773 mod_timer(&local->sta_cleanup,
770 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 774 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL));
771 add_timer(&local->sta_cleanup);
772} 775}
773 776
774void sta_info_init(struct ieee80211_local *local) 777void sta_info_init(struct ieee80211_local *local)
@@ -781,14 +784,6 @@ void sta_info_init(struct ieee80211_local *local)
781 784
782 setup_timer(&local->sta_cleanup, sta_info_cleanup, 785 setup_timer(&local->sta_cleanup, sta_info_cleanup,
783 (unsigned long)local); 786 (unsigned long)local);
784 local->sta_cleanup.expires =
785 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
786}
787
788int sta_info_start(struct ieee80211_local *local)
789{
790 add_timer(&local->sta_cleanup);
791 return 0;
792} 787}
793 788
794void sta_info_stop(struct ieee80211_local *local) 789void sta_info_stop(struct ieee80211_local *local)
@@ -900,6 +895,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
900 struct ieee80211_local *local = sdata->local; 895 struct ieee80211_local *local = sdata->local;
901 int sent, buffered; 896 int sent, buffered;
902 897
898 clear_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
903 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 899 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
904 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 900 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
905 901
@@ -992,3 +988,12 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
992 ieee80211_queue_work(hw, &sta->drv_unblock_wk); 988 ieee80211_queue_work(hw, &sta->drv_unblock_wk);
993} 989}
994EXPORT_SYMBOL(ieee80211_sta_block_awake); 990EXPORT_SYMBOL(ieee80211_sta_block_awake);
991
992void ieee80211_sta_set_tim(struct ieee80211_sta *pubsta)
993{
994 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
995
996 set_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
997 sta_info_set_tim_bit(sta);
998}
999EXPORT_SYMBOL(ieee80211_sta_set_tim);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index b2f95966c7f4..aa0adcbf3a93 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -43,6 +43,8 @@
43 * be in the queues 43 * be in the queues
44 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping 44 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
45 * station in power-save mode, reply when the driver unblocks. 45 * station in power-save mode, reply when the driver unblocks.
46 * @WLAN_STA_PS_DRIVER_BUF: Station has frames pending in driver internal
47 * buffers. Automatically cleared on station wake-up.
46 */ 48 */
47enum ieee80211_sta_info_flags { 49enum ieee80211_sta_info_flags {
48 WLAN_STA_AUTH = 1<<0, 50 WLAN_STA_AUTH = 1<<0,
@@ -58,6 +60,7 @@ enum ieee80211_sta_info_flags {
58 WLAN_STA_BLOCK_BA = 1<<11, 60 WLAN_STA_BLOCK_BA = 1<<11,
59 WLAN_STA_PS_DRIVER = 1<<12, 61 WLAN_STA_PS_DRIVER = 1<<12,
60 WLAN_STA_PSPOLL = 1<<13, 62 WLAN_STA_PSPOLL = 1<<13,
63 WLAN_STA_PS_DRIVER_BUF = 1<<14,
61}; 64};
62 65
63#define STA_TID_NUM 16 66#define STA_TID_NUM 16
@@ -226,6 +229,7 @@ enum plink_state {
226 * @rx_bytes: Number of bytes received from this STA 229 * @rx_bytes: Number of bytes received from this STA
227 * @wep_weak_iv_count: number of weak WEP IVs received from this station 230 * @wep_weak_iv_count: number of weak WEP IVs received from this station
228 * @last_rx: time (in jiffies) when last frame was received from this STA 231 * @last_rx: time (in jiffies) when last frame was received from this STA
232 * @last_connected: time (in seconds) when a station got connected
229 * @num_duplicates: number of duplicate frames received from this STA 233 * @num_duplicates: number of duplicate frames received from this STA
230 * @rx_fragments: number of received MPDUs 234 * @rx_fragments: number of received MPDUs
231 * @rx_dropped: number of dropped MPDUs from this STA 235 * @rx_dropped: number of dropped MPDUs from this STA
@@ -295,6 +299,7 @@ struct sta_info {
295 unsigned long rx_packets, rx_bytes; 299 unsigned long rx_packets, rx_bytes;
296 unsigned long wep_weak_iv_count; 300 unsigned long wep_weak_iv_count;
297 unsigned long last_rx; 301 unsigned long last_rx;
302 long last_connected;
298 unsigned long num_duplicates; 303 unsigned long num_duplicates;
299 unsigned long rx_fragments; 304 unsigned long rx_fragments;
300 unsigned long rx_dropped; 305 unsigned long rx_dropped;
@@ -497,7 +502,6 @@ void sta_info_set_tim_bit(struct sta_info *sta);
497void sta_info_clear_tim_bit(struct sta_info *sta); 502void sta_info_clear_tim_bit(struct sta_info *sta);
498 503
499void sta_info_init(struct ieee80211_local *local); 504void sta_info_init(struct ieee80211_local *local);
500int sta_info_start(struct ieee80211_local *local);
501void sta_info_stop(struct ieee80211_local *local); 505void sta_info_stop(struct ieee80211_local *local);
502int sta_info_flush(struct ieee80211_local *local, 506int sta_info_flush(struct ieee80211_local *local,
503 struct ieee80211_sub_if_data *sdata); 507 struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index b936dd29e92b..1658efaa2e8e 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -189,16 +189,19 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
189 bool acked; 189 bool acked;
190 190
191 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 191 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
192 /* the HW cannot have attempted that rate */ 192 if (info->status.rates[i].idx < 0) {
193 if (i >= hw->max_report_rates) { 193 break;
194 } else if (i >= hw->max_report_rates) {
195 /* the HW cannot have attempted that rate */
194 info->status.rates[i].idx = -1; 196 info->status.rates[i].idx = -1;
195 info->status.rates[i].count = 0; 197 info->status.rates[i].count = 0;
196 } else if (info->status.rates[i].idx >= 0) { 198 break;
197 rates_idx = i;
198 } 199 }
199 200
200 retry_count += info->status.rates[i].count; 201 retry_count += info->status.rates[i].count;
201 } 202 }
203 rates_idx = i - 1;
204
202 if (retry_count < 0) 205 if (retry_count < 0)
203 retry_count = 0; 206 retry_count = 0;
204 207
@@ -443,3 +446,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
443 dev_kfree_skb(skb); 446 dev_kfree_skb(skb);
444} 447}
445EXPORT_SYMBOL(ieee80211_tx_status); 448EXPORT_SYMBOL(ieee80211_tx_status);
449
450void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
451{
452 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
453 cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
454 num_packets, GFP_ATOMIC);
455}
456EXPORT_SYMBOL(ieee80211_report_low_ack);
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index e840c9cd46db..757e4eb2baf7 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(ieee80211_get_tkip_key);
202 * @payload_len is the length of payload (_not_ including IV/ICV length). 202 * @payload_len is the length of payload (_not_ including IV/ICV length).
203 * @ta is the transmitter addresses. 203 * @ta is the transmitter addresses.
204 */ 204 */
205int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 205int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
206 struct ieee80211_key *key, 206 struct ieee80211_key *key,
207 u8 *pos, size_t payload_len, u8 *ta) 207 u8 *pos, size_t payload_len, u8 *ta)
208{ 208{
@@ -223,7 +223,7 @@ int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
223 * beginning of the buffer containing IEEE 802.11 header payload, i.e., 223 * beginning of the buffer containing IEEE 802.11 header payload, i.e.,
224 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the 224 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the
225 * length of payload, including IV, Ext. IV, MIC, ICV. */ 225 * length of payload, including IV, Ext. IV, MIC, ICV. */
226int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, 226int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
227 struct ieee80211_key *key, 227 struct ieee80211_key *key,
228 u8 *payload, size_t payload_len, u8 *ta, 228 u8 *payload, size_t payload_len, u8 *ta,
229 u8 *ra, int only_iv, int queue, 229 u8 *ra, int only_iv, int queue,
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h
index 7e83dee976fa..1cab9c86978f 100644
--- a/net/mac80211/tkip.h
+++ b/net/mac80211/tkip.h
@@ -15,7 +15,7 @@
15 15
16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16); 16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16);
17 17
18int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 18int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
19 struct ieee80211_key *key, 19 struct ieee80211_key *key,
20 u8 *pos, size_t payload_len, u8 *ta); 20 u8 *pos, size_t payload_len, u8 *ta);
21enum { 21enum {
@@ -24,7 +24,7 @@ enum {
24 TKIP_DECRYPT_INVALID_KEYIDX = -2, 24 TKIP_DECRYPT_INVALID_KEYIDX = -2,
25 TKIP_DECRYPT_REPLAY = -3, 25 TKIP_DECRYPT_REPLAY = -3,
26}; 26};
27int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, 27int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
28 struct ieee80211_key *key, 28 struct ieee80211_key *key,
29 u8 *payload, size_t payload_len, u8 *ta, 29 u8 *payload, size_t payload_len, u8 *ta,
30 u8 *ra, int only_iv, int queue, 30 u8 *ra, int only_iv, int queue,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index ce4596ed1268..e3e3aa173af0 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1036,14 +1036,11 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1036 struct ieee80211_radiotap_iterator iterator; 1036 struct ieee80211_radiotap_iterator iterator;
1037 struct ieee80211_radiotap_header *rthdr = 1037 struct ieee80211_radiotap_header *rthdr =
1038 (struct ieee80211_radiotap_header *) skb->data; 1038 (struct ieee80211_radiotap_header *) skb->data;
1039 struct ieee80211_supported_band *sband;
1040 bool hw_frag; 1039 bool hw_frag;
1041 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1040 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1042 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len, 1041 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1043 NULL); 1042 NULL);
1044 1043
1045 sband = tx->local->hw.wiphy->bands[tx->channel->band];
1046
1047 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1044 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1048 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 1045 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1049 1046
@@ -1442,11 +1439,8 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1442 struct ieee80211_tx_data tx; 1439 struct ieee80211_tx_data tx;
1443 ieee80211_tx_result res_prepare; 1440 ieee80211_tx_result res_prepare;
1444 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1441 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1445 u16 queue;
1446 bool result = true; 1442 bool result = true;
1447 1443
1448 queue = skb_get_queue_mapping(skb);
1449
1450 if (unlikely(skb->len < 10)) { 1444 if (unlikely(skb->len < 10)) {
1451 dev_kfree_skb(skb); 1445 dev_kfree_skb(skb);
1452 return true; 1446 return true;
@@ -1482,12 +1476,7 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1482{ 1476{
1483 int tail_need = 0; 1477 int tail_need = 0;
1484 1478
1485 /* 1479 if (may_encrypt && local->crypto_tx_tailroom_needed_cnt) {
1486 * This could be optimised, devices that do full hardware
1487 * crypto (including TKIP MMIC) need no tailroom... But we
1488 * have no drivers for such devices currently.
1489 */
1490 if (may_encrypt) {
1491 tail_need = IEEE80211_ENCRYPT_TAILROOM; 1480 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1492 tail_need -= skb_tailroom(skb); 1481 tail_need -= skb_tailroom(skb);
1493 tail_need = max_t(int, tail_need, 0); 1482 tail_need = max_t(int, tail_need, 0);
@@ -2262,7 +2251,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2262 2251
2263 /* headroom, head length, tail length and maximum TIM length */ 2252 /* headroom, head length, tail length and maximum TIM length */
2264 skb = dev_alloc_skb(local->tx_headroom + 400 + 2253 skb = dev_alloc_skb(local->tx_headroom + 400 +
2265 sdata->u.mesh.vendor_ie_len); 2254 sdata->u.mesh.ie_len);
2266 if (!skb) 2255 if (!skb)
2267 goto out; 2256 goto out;
2268 2257
@@ -2485,7 +2474,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2485{ 2474{
2486 struct ieee80211_local *local = hw_to_local(hw); 2475 struct ieee80211_local *local = hw_to_local(hw);
2487 struct sk_buff *skb = NULL; 2476 struct sk_buff *skb = NULL;
2488 struct sta_info *sta;
2489 struct ieee80211_tx_data tx; 2477 struct ieee80211_tx_data tx;
2490 struct ieee80211_sub_if_data *sdata; 2478 struct ieee80211_sub_if_data *sdata;
2491 struct ieee80211_if_ap *bss = NULL; 2479 struct ieee80211_if_ap *bss = NULL;
@@ -2527,7 +2515,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2527 2515
2528 info = IEEE80211_SKB_CB(skb); 2516 info = IEEE80211_SKB_CB(skb);
2529 2517
2530 sta = tx.sta;
2531 tx.flags |= IEEE80211_TX_PS_BUFFERED; 2518 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2532 tx.channel = local->hw.conf.channel; 2519 tx.channel = local->hw.conf.channel;
2533 info->band = tx.channel->band; 2520 info->band = tx.channel->band;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 556647a910ac..ef0560a2346a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1290,7 +1290,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1290 } 1290 }
1291 } 1291 }
1292 1292
1293 add_timer(&local->sta_cleanup); 1293 mod_timer(&local->sta_cleanup, jiffies + 1);
1294 1294
1295 mutex_lock(&local->sta_mtx); 1295 mutex_lock(&local->sta_mtx);
1296 list_for_each_entry(sta, &local->sta_list, list) 1296 list_for_each_entry(sta, &local->sta_list, list)
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 2ff6d1e3ed21..a1c6bfd55f0f 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -30,17 +30,15 @@ int ieee80211_wep_init(struct ieee80211_local *local)
30 /* start WEP IV from a random value */ 30 /* start WEP IV from a random value */
31 get_random_bytes(&local->wep_iv, WEP_IV_LEN); 31 get_random_bytes(&local->wep_iv, WEP_IV_LEN);
32 32
33 local->wep_tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 33 local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
34 CRYPTO_ALG_ASYNC);
35 if (IS_ERR(local->wep_tx_tfm)) { 34 if (IS_ERR(local->wep_tx_tfm)) {
36 local->wep_rx_tfm = ERR_PTR(-EINVAL); 35 local->wep_rx_tfm = ERR_PTR(-EINVAL);
37 return PTR_ERR(local->wep_tx_tfm); 36 return PTR_ERR(local->wep_tx_tfm);
38 } 37 }
39 38
40 local->wep_rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 39 local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
41 CRYPTO_ALG_ASYNC);
42 if (IS_ERR(local->wep_rx_tfm)) { 40 if (IS_ERR(local->wep_rx_tfm)) {
43 crypto_free_blkcipher(local->wep_tx_tfm); 41 crypto_free_cipher(local->wep_tx_tfm);
44 local->wep_tx_tfm = ERR_PTR(-EINVAL); 42 local->wep_tx_tfm = ERR_PTR(-EINVAL);
45 return PTR_ERR(local->wep_rx_tfm); 43 return PTR_ERR(local->wep_rx_tfm);
46 } 44 }
@@ -51,9 +49,9 @@ int ieee80211_wep_init(struct ieee80211_local *local)
51void ieee80211_wep_free(struct ieee80211_local *local) 49void ieee80211_wep_free(struct ieee80211_local *local)
52{ 50{
53 if (!IS_ERR(local->wep_tx_tfm)) 51 if (!IS_ERR(local->wep_tx_tfm))
54 crypto_free_blkcipher(local->wep_tx_tfm); 52 crypto_free_cipher(local->wep_tx_tfm);
55 if (!IS_ERR(local->wep_rx_tfm)) 53 if (!IS_ERR(local->wep_rx_tfm))
56 crypto_free_blkcipher(local->wep_rx_tfm); 54 crypto_free_cipher(local->wep_rx_tfm);
57} 55}
58 56
59static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) 57static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen)
@@ -127,12 +125,11 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local,
127/* Perform WEP encryption using given key. data buffer must have tailroom 125/* Perform WEP encryption using given key. data buffer must have tailroom
128 * for 4-byte ICV. data_len must not include this ICV. Note: this function 126 * for 4-byte ICV. data_len must not include this ICV. Note: this function
129 * does _not_ add IV. data = RC4(data | CRC32(data)) */ 127 * does _not_ add IV. data = RC4(data | CRC32(data)) */
130int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 128int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
131 size_t klen, u8 *data, size_t data_len) 129 size_t klen, u8 *data, size_t data_len)
132{ 130{
133 struct blkcipher_desc desc = { .tfm = tfm };
134 struct scatterlist sg;
135 __le32 icv; 131 __le32 icv;
132 int i;
136 133
137 if (IS_ERR(tfm)) 134 if (IS_ERR(tfm))
138 return -1; 135 return -1;
@@ -140,9 +137,9 @@ int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
140 icv = cpu_to_le32(~crc32_le(~0, data, data_len)); 137 icv = cpu_to_le32(~crc32_le(~0, data, data_len));
141 put_unaligned(icv, (__le32 *)(data + data_len)); 138 put_unaligned(icv, (__le32 *)(data + data_len));
142 139
143 crypto_blkcipher_setkey(tfm, rc4key, klen); 140 crypto_cipher_setkey(tfm, rc4key, klen);
144 sg_init_one(&sg, data, data_len + WEP_ICV_LEN); 141 for (i = 0; i < data_len + WEP_ICV_LEN; i++)
145 crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length); 142 crypto_cipher_encrypt_one(tfm, data + i, data + i);
146 143
147 return 0; 144 return 0;
148} 145}
@@ -186,19 +183,18 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
186/* Perform WEP decryption using given key. data buffer includes encrypted 183/* Perform WEP decryption using given key. data buffer includes encrypted
187 * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV. 184 * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV.
188 * Return 0 on success and -1 on ICV mismatch. */ 185 * Return 0 on success and -1 on ICV mismatch. */
189int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 186int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
190 size_t klen, u8 *data, size_t data_len) 187 size_t klen, u8 *data, size_t data_len)
191{ 188{
192 struct blkcipher_desc desc = { .tfm = tfm };
193 struct scatterlist sg;
194 __le32 crc; 189 __le32 crc;
190 int i;
195 191
196 if (IS_ERR(tfm)) 192 if (IS_ERR(tfm))
197 return -1; 193 return -1;
198 194
199 crypto_blkcipher_setkey(tfm, rc4key, klen); 195 crypto_cipher_setkey(tfm, rc4key, klen);
200 sg_init_one(&sg, data, data_len + WEP_ICV_LEN); 196 for (i = 0; i < data_len + WEP_ICV_LEN; i++)
201 crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length); 197 crypto_cipher_decrypt_one(tfm, data + i, data + i);
202 198
203 crc = cpu_to_le32(~crc32_le(~0, data, data_len)); 199 crc = cpu_to_le32(~crc32_le(~0, data, data_len));
204 if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0) 200 if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0)
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index 58654ee33518..01e54840a628 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -18,12 +18,12 @@
18 18
19int ieee80211_wep_init(struct ieee80211_local *local); 19int ieee80211_wep_init(struct ieee80211_local *local);
20void ieee80211_wep_free(struct ieee80211_local *local); 20void ieee80211_wep_free(struct ieee80211_local *local);
21int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 21int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
22 size_t klen, u8 *data, size_t data_len); 22 size_t klen, u8 *data, size_t data_len);
23int ieee80211_wep_encrypt(struct ieee80211_local *local, 23int ieee80211_wep_encrypt(struct ieee80211_local *local,
24 struct sk_buff *skb, 24 struct sk_buff *skb,
25 const u8 *key, int keylen, int keyidx); 25 const u8 *key, int keylen, int keyidx);
26int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 26int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
27 size_t klen, u8 *data, size_t data_len); 27 size_t klen, u8 *data, size_t data_len);
28bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); 28bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key);
29 29
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index e73c8cae036b..a94b312dbfac 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -198,9 +198,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
198 struct sk_buff *skb; 198 struct sk_buff *skb;
199 struct ieee80211_mgmt *mgmt; 199 struct ieee80211_mgmt *mgmt;
200 u8 *pos, qos_info; 200 u8 *pos, qos_info;
201 const u8 *ies;
202 size_t offset = 0, noffset; 201 size_t offset = 0, noffset;
203 int i, len, count, rates_len, supp_rates_len; 202 int i, count, rates_len, supp_rates_len;
204 u16 capab; 203 u16 capab;
205 struct ieee80211_supported_band *sband; 204 struct ieee80211_supported_band *sband;
206 u32 rates = 0; 205 u32 rates = 0;
@@ -285,7 +284,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
285 } 284 }
286 285
287 /* SSID */ 286 /* SSID */
288 ies = pos = skb_put(skb, 2 + wk->assoc.ssid_len); 287 pos = skb_put(skb, 2 + wk->assoc.ssid_len);
289 *pos++ = WLAN_EID_SSID; 288 *pos++ = WLAN_EID_SSID;
290 *pos++ = wk->assoc.ssid_len; 289 *pos++ = wk->assoc.ssid_len;
291 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len); 290 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
@@ -295,7 +294,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
295 if (supp_rates_len > 8) 294 if (supp_rates_len > 8)
296 supp_rates_len = 8; 295 supp_rates_len = 8;
297 296
298 len = sband->n_bitrates;
299 pos = skb_put(skb, supp_rates_len + 2); 297 pos = skb_put(skb, supp_rates_len + 2);
300 *pos++ = WLAN_EID_SUPP_RATES; 298 *pos++ = WLAN_EID_SUPP_RATES;
301 *pos++ = supp_rates_len; 299 *pos++ = supp_rates_len;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index f1765de2f4bf..9dc3b5f26e80 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -87,42 +87,76 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
87 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 87 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
88 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 88 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
89 89
90 /* No way to verify the MIC if the hardware stripped it */ 90 /*
91 if (status->flag & RX_FLAG_MMIC_STRIPPED) 91 * it makes no sense to check for MIC errors on anything other
92 * than data frames.
93 */
94 if (!ieee80211_is_data_present(hdr->frame_control))
95 return RX_CONTINUE;
96
97 /*
98 * No way to verify the MIC if the hardware stripped it or
99 * the IV with the key index. In this case we have solely rely
100 * on the driver to set RX_FLAG_MMIC_ERROR in the event of a
101 * MIC failure report.
102 */
103 if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) {
104 if (status->flag & RX_FLAG_MMIC_ERROR)
105 goto mic_fail;
106
107 if (!(status->flag & RX_FLAG_IV_STRIPPED))
108 goto update_iv;
109
92 return RX_CONTINUE; 110 return RX_CONTINUE;
111 }
93 112
113 /*
114 * Some hardware seems to generate Michael MIC failure reports; even
115 * though, the frame was not encrypted with TKIP and therefore has no
116 * MIC. Ignore the flag them to avoid triggering countermeasures.
117 */
94 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || 118 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
95 !ieee80211_has_protected(hdr->frame_control) || 119 !(status->flag & RX_FLAG_DECRYPTED))
96 !ieee80211_is_data_present(hdr->frame_control))
97 return RX_CONTINUE; 120 return RX_CONTINUE;
98 121
122 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && rx->key->conf.keyidx) {
123 /*
124 * APs with pairwise keys should never receive Michael MIC
125 * errors for non-zero keyidx because these are reserved for
126 * group keys and only the AP is sending real multicast
127 * frames in the BSS. (
128 */
129 return RX_DROP_UNUSABLE;
130 }
131
132 if (status->flag & RX_FLAG_MMIC_ERROR)
133 goto mic_fail;
134
99 hdrlen = ieee80211_hdrlen(hdr->frame_control); 135 hdrlen = ieee80211_hdrlen(hdr->frame_control);
100 if (skb->len < hdrlen + MICHAEL_MIC_LEN) 136 if (skb->len < hdrlen + MICHAEL_MIC_LEN)
101 return RX_DROP_UNUSABLE; 137 return RX_DROP_UNUSABLE;
102 138
103 data = skb->data + hdrlen; 139 data = skb->data + hdrlen;
104 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; 140 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
105
106 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 141 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
107 michael_mic(key, hdr, data, data_len, mic); 142 michael_mic(key, hdr, data, data_len, mic);
108 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) { 143 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0)
109 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 144 goto mic_fail;
110 return RX_DROP_UNUSABLE;
111
112 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
113 (void *) skb->data, NULL,
114 GFP_ATOMIC);
115 return RX_DROP_UNUSABLE;
116 }
117 145
118 /* remove Michael MIC from payload */ 146 /* remove Michael MIC from payload */
119 skb_trim(skb, skb->len - MICHAEL_MIC_LEN); 147 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
120 148
149update_iv:
121 /* update IV in key information to be able to detect replays */ 150 /* update IV in key information to be able to detect replays */
122 rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32; 151 rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32;
123 rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16; 152 rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16;
124 153
125 return RX_CONTINUE; 154 return RX_CONTINUE;
155
156mic_fail:
157 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
158 (void *) skb->data, NULL, GFP_ATOMIC);
159 return RX_DROP_UNUSABLE;
126} 160}
127 161
128 162
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 8d5227212686..757143b2240a 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -11,6 +11,7 @@
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/icmp.h> 12#include <linux/icmp.h>
13#include <linux/icmpv6.h> 13#include <linux/icmpv6.h>
14#include <linux/sctp.h>
14#include <linux/netfilter_ipv6/ip6_tables.h> 15#include <linux/netfilter_ipv6/ip6_tables.h>
15#include <net/ip.h> 16#include <net/ip.h>
16#include <net/ipv6.h> 17#include <net/ipv6.h>
@@ -35,7 +36,20 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
35 *port = src ? th->source : th->dest; 36 *port = src ? th->source : th->dest;
36 break; 37 break;
37 } 38 }
38 case IPPROTO_UDP: { 39 case IPPROTO_SCTP: {
40 sctp_sctphdr_t _sh;
41 const sctp_sctphdr_t *sh;
42
43 sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh);
44 if (sh == NULL)
45 /* No choice either */
46 return false;
47
48 *port = src ? sh->source : sh->dest;
49 break;
50 }
51 case IPPROTO_UDP:
52 case IPPROTO_UDPLITE: {
39 struct udphdr _udph; 53 struct udphdr _udph;
40 const struct udphdr *uh; 54 const struct udphdr *uh;
41 55
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index b9214145d357..14281b6b8074 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -491,7 +491,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
491 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 491 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
492 .dimension = IPSET_DIM_TWO, 492 .dimension = IPSET_DIM_TWO,
493 .family = AF_UNSPEC, 493 .family = AF_UNSPEC,
494 .revision = 0, 494 .revision = 1,
495 .create = hash_ipport_create, 495 .create = hash_ipport_create,
496 .create_policy = { 496 .create_policy = {
497 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 497 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 4642872df6e1..401c8a2531db 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -509,7 +509,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
509 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 509 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
510 .dimension = IPSET_DIM_THREE, 510 .dimension = IPSET_DIM_THREE,
511 .family = AF_UNSPEC, 511 .family = AF_UNSPEC,
512 .revision = 0, 512 .revision = 1,
513 .create = hash_ipportip_create, 513 .create = hash_ipportip_create,
514 .create_policy = { 514 .create_policy = {
515 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 515 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 2cb84a54b7ad..4743e5402522 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -574,7 +574,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
574 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 574 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
575 .dimension = IPSET_DIM_THREE, 575 .dimension = IPSET_DIM_THREE,
576 .family = AF_UNSPEC, 576 .family = AF_UNSPEC,
577 .revision = 0, 577 .revision = 1,
578 .create = hash_ipportnet_create, 578 .create = hash_ipportnet_create,
579 .create_policy = { 579 .create_policy = {
580 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 580 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 8598676f2a05..d2a40362dd3a 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -526,7 +526,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
526 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 526 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
527 .dimension = IPSET_DIM_TWO, 527 .dimension = IPSET_DIM_TWO,
528 .family = AF_UNSPEC, 528 .family = AF_UNSPEC,
529 .revision = 0, 529 .revision = 1,
530 .create = hash_netport_create, 530 .create = hash_netport_create,
531 .create_policy = { 531 .create_policy = {
532 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 532 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index ae47090bf45f..9930f340908a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1984,9 +1984,6 @@ static const struct file_operations ip_vs_info_fops = {
1984 .release = seq_release_private, 1984 .release = seq_release_private,
1985}; 1985};
1986 1986
1987#endif
1988
1989#ifdef CONFIG_PROC_FS
1990static int ip_vs_stats_show(struct seq_file *seq, void *v) 1987static int ip_vs_stats_show(struct seq_file *seq, void *v)
1991{ 1988{
1992 struct net *net = seq_file_single_net(seq); 1989 struct net *net = seq_file_single_net(seq);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 0ae142825881..05e9feb101c3 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -245,7 +245,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
245 ret = 0; 245 ret = 0;
246release: 246release:
247 nf_ct_put(ct); 247 nf_ct_put(ct);
248 return 0; 248 return ret;
249} 249}
250 250
251static const struct seq_operations ct_seq_ops = { 251static const struct seq_operations ct_seq_ops = {
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 985e9b76c916..e0ee010935e7 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -381,7 +381,6 @@ __build_packet_message(struct nfulnl_instance *inst,
381 struct nfulnl_msg_packet_hdr pmsg; 381 struct nfulnl_msg_packet_hdr pmsg;
382 struct nlmsghdr *nlh; 382 struct nlmsghdr *nlh;
383 struct nfgenmsg *nfmsg; 383 struct nfgenmsg *nfmsg;
384 __be32 tmp_uint;
385 sk_buff_data_t old_tail = inst->skb->tail; 384 sk_buff_data_t old_tail = inst->skb->tail;
386 385
387 nlh = NLMSG_PUT(inst->skb, 0, 0, 386 nlh = NLMSG_PUT(inst->skb, 0, 0,
@@ -428,7 +427,6 @@ __build_packet_message(struct nfulnl_instance *inst,
428 } 427 }
429 428
430 if (outdev) { 429 if (outdev) {
431 tmp_uint = htonl(outdev->ifindex);
432#ifndef CONFIG_BRIDGE_NETFILTER 430#ifndef CONFIG_BRIDGE_NETFILTER
433 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, 431 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
434 htonl(outdev->ifindex)); 432 htonl(outdev->ifindex));
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index a9adf4c6b299..52959efca858 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -762,8 +762,8 @@ void xt_compat_unlock(u_int8_t af)
762EXPORT_SYMBOL_GPL(xt_compat_unlock); 762EXPORT_SYMBOL_GPL(xt_compat_unlock);
763#endif 763#endif
764 764
765DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); 765DEFINE_PER_CPU(seqcount_t, xt_recseq);
766EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); 766EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
767 767
768static int xt_jumpstack_alloc(struct xt_table_info *i) 768static int xt_jumpstack_alloc(struct xt_table_info *i)
769{ 769{
@@ -1362,10 +1362,7 @@ static int __init xt_init(void)
1362 int rv; 1362 int rv;
1363 1363
1364 for_each_possible_cpu(i) { 1364 for_each_possible_cpu(i) {
1365 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); 1365 seqcount_init(&per_cpu(xt_recseq, i));
1366
1367 seqlock_init(&lock->lock);
1368 lock->readers = 0;
1369 } 1366 }
1370 1367
1371 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); 1368 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 5f14c8462e30..bae5756b1626 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -422,7 +422,6 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
422 422
423{ 423{
424 int ret_val = -EINVAL; 424 int ret_val = -EINVAL;
425 const char *type_str = "(unknown)";
426 struct netlbl_audit audit_info; 425 struct netlbl_audit audit_info;
427 426
428 if (!info->attrs[NLBL_CIPSOV4_A_DOI] || 427 if (!info->attrs[NLBL_CIPSOV4_A_DOI] ||
@@ -432,15 +431,12 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
432 netlbl_netlink_auditinfo(skb, &audit_info); 431 netlbl_netlink_auditinfo(skb, &audit_info);
433 switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) { 432 switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) {
434 case CIPSO_V4_MAP_TRANS: 433 case CIPSO_V4_MAP_TRANS:
435 type_str = "trans";
436 ret_val = netlbl_cipsov4_add_std(info, &audit_info); 434 ret_val = netlbl_cipsov4_add_std(info, &audit_info);
437 break; 435 break;
438 case CIPSO_V4_MAP_PASS: 436 case CIPSO_V4_MAP_PASS:
439 type_str = "pass";
440 ret_val = netlbl_cipsov4_add_pass(info, &audit_info); 437 ret_val = netlbl_cipsov4_add_pass(info, &audit_info);
441 break; 438 break;
442 case CIPSO_V4_MAP_LOCAL: 439 case CIPSO_V4_MAP_LOCAL:
443 type_str = "local";
444 ret_val = netlbl_cipsov4_add_local(info, &audit_info); 440 ret_val = netlbl_cipsov4_add_local(info, &audit_info);
445 break; 441 break;
446 } 442 }
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 06cb02796a0e..732152f718e0 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -591,7 +591,6 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
591 return -EINVAL; 591 return -EINVAL;
592 } 592 }
593 if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) { 593 if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) {
594 SOCK_DEBUG(sk, "NET/ROM: bind failed: invalid node callsign\n");
595 release_sock(sk); 594 release_sock(sk);
596 return -EADDRNOTAVAIL; 595 return -EADDRNOTAVAIL;
597 } 596 }
@@ -632,7 +631,7 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
632 sock_reset_flag(sk, SOCK_ZAPPED); 631 sock_reset_flag(sk, SOCK_ZAPPED);
633 dev_put(dev); 632 dev_put(dev);
634 release_sock(sk); 633 release_sock(sk);
635 SOCK_DEBUG(sk, "NET/ROM: socket is bound\n"); 634
636 return 0; 635 return 0;
637} 636}
638 637
@@ -1082,8 +1081,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1082 sax.sax25_call = nr->dest_addr; 1081 sax.sax25_call = nr->dest_addr;
1083 } 1082 }
1084 1083
1085 SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n");
1086
1087 /* Build a packet - the conventional user limit is 236 bytes. We can 1084 /* Build a packet - the conventional user limit is 236 bytes. We can
1088 do ludicrously large NetROM frames but must not overflow */ 1085 do ludicrously large NetROM frames but must not overflow */
1089 if (len > 65536) { 1086 if (len > 65536) {
@@ -1091,7 +1088,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1091 goto out; 1088 goto out;
1092 } 1089 }
1093 1090
1094 SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
1095 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; 1091 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
1096 1092
1097 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) 1093 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
@@ -1105,7 +1101,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1105 */ 1101 */
1106 1102
1107 asmptr = skb_push(skb, NR_TRANSPORT_LEN); 1103 asmptr = skb_push(skb, NR_TRANSPORT_LEN);
1108 SOCK_DEBUG(sk, "Building NET/ROM Header.\n");
1109 1104
1110 /* Build a NET/ROM Transport header */ 1105 /* Build a NET/ROM Transport header */
1111 1106
@@ -1114,15 +1109,12 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1114 *asmptr++ = 0; /* To be filled in later */ 1109 *asmptr++ = 0; /* To be filled in later */
1115 *asmptr++ = 0; /* Ditto */ 1110 *asmptr++ = 0; /* Ditto */
1116 *asmptr++ = NR_INFO; 1111 *asmptr++ = NR_INFO;
1117 SOCK_DEBUG(sk, "Built header.\n");
1118 1112
1119 /* 1113 /*
1120 * Put the data on the end 1114 * Put the data on the end
1121 */ 1115 */
1122 skb_put(skb, len); 1116 skb_put(skb, len);
1123 1117
1124 SOCK_DEBUG(sk, "NET/ROM: Appending user data\n");
1125
1126 /* User data follows immediately after the NET/ROM transport header */ 1118 /* User data follows immediately after the NET/ROM transport header */
1127 if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) { 1119 if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) {
1128 kfree_skb(skb); 1120 kfree_skb(skb);
@@ -1130,8 +1122,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1130 goto out; 1122 goto out;
1131 } 1123 }
1132 1124
1133 SOCK_DEBUG(sk, "NET/ROM: Transmitting buffer\n");
1134
1135 if (sk->sk_state != TCP_ESTABLISHED) { 1125 if (sk->sk_state != TCP_ESTABLISHED) {
1136 kfree_skb(skb); 1126 kfree_skb(skb);
1137 err = -ENOTCONN; 1127 err = -ENOTCONN;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b5362e96022b..549527bca87a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -538,7 +538,7 @@ static inline unsigned int run_filter(const struct sk_buff *skb,
538 rcu_read_lock(); 538 rcu_read_lock();
539 filter = rcu_dereference(sk->sk_filter); 539 filter = rcu_dereference(sk->sk_filter);
540 if (filter != NULL) 540 if (filter != NULL)
541 res = sk_run_filter(skb, filter->insns); 541 res = SK_RUN_FILTER(filter, skb);
542 rcu_read_unlock(); 542 rcu_read_unlock();
543 543
544 return res; 544 return res;
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 947038ddd04c..47b3452675b6 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -426,18 +426,14 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
426 return 0; 426 return 0;
427} 427}
428 428
429struct net_device *phonet_route_get(struct net *net, u8 daddr) 429struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr)
430{ 430{
431 struct phonet_net *pnn = phonet_pernet(net); 431 struct phonet_net *pnn = phonet_pernet(net);
432 struct phonet_routes *routes = &pnn->routes; 432 struct phonet_routes *routes = &pnn->routes;
433 struct net_device *dev; 433 struct net_device *dev;
434 434
435 ASSERT_RTNL(); /* no need to hold the device */
436
437 daddr >>= 2; 435 daddr >>= 2;
438 rcu_read_lock();
439 dev = rcu_dereference(routes->table[daddr]); 436 dev = rcu_dereference(routes->table[daddr]);
440 rcu_read_unlock();
441 return dev; 437 return dev;
442} 438}
443 439
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 58b3b1f991ed..438accb7a5a8 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -264,10 +264,11 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
264 struct net *net = sock_net(skb->sk); 264 struct net *net = sock_net(skb->sk);
265 u8 addr, addr_idx = 0, addr_start_idx = cb->args[0]; 265 u8 addr, addr_idx = 0, addr_start_idx = cb->args[0];
266 266
267 rcu_read_lock();
267 for (addr = 0; addr < 64; addr++) { 268 for (addr = 0; addr < 64; addr++) {
268 struct net_device *dev; 269 struct net_device *dev;
269 270
270 dev = phonet_route_get(net, addr << 2); 271 dev = phonet_route_get_rcu(net, addr << 2);
271 if (!dev) 272 if (!dev)
272 continue; 273 continue;
273 274
@@ -279,6 +280,7 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
279 } 280 }
280 281
281out: 282out:
283 rcu_read_unlock();
282 cb->args[0] = addr_idx; 284 cb->args[0] = addr_idx;
283 cb->args[1] = 0; 285 cb->args[1] = 0;
284 286
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index b1adafab377c..8c5bfcef92cb 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -52,7 +52,7 @@ static int pn_socket_release(struct socket *sock)
52 52
53static struct { 53static struct {
54 struct hlist_head hlist[PN_HASHSIZE]; 54 struct hlist_head hlist[PN_HASHSIZE];
55 spinlock_t lock; 55 struct mutex lock;
56} pnsocks; 56} pnsocks;
57 57
58void __init pn_sock_init(void) 58void __init pn_sock_init(void)
@@ -61,7 +61,7 @@ void __init pn_sock_init(void)
61 61
62 for (i = 0; i < PN_HASHSIZE; i++) 62 for (i = 0; i < PN_HASHSIZE; i++)
63 INIT_HLIST_HEAD(pnsocks.hlist + i); 63 INIT_HLIST_HEAD(pnsocks.hlist + i);
64 spin_lock_init(&pnsocks.lock); 64 mutex_init(&pnsocks.lock);
65} 65}
66 66
67static struct hlist_head *pn_hash_list(u16 obj) 67static struct hlist_head *pn_hash_list(u16 obj)
@@ -82,9 +82,8 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
82 u8 res = spn->spn_resource; 82 u8 res = spn->spn_resource;
83 struct hlist_head *hlist = pn_hash_list(obj); 83 struct hlist_head *hlist = pn_hash_list(obj);
84 84
85 spin_lock_bh(&pnsocks.lock); 85 rcu_read_lock();
86 86 sk_for_each_rcu(sknode, node, hlist) {
87 sk_for_each(sknode, node, hlist) {
88 struct pn_sock *pn = pn_sk(sknode); 87 struct pn_sock *pn = pn_sk(sknode);
89 BUG_ON(!pn->sobject); /* unbound socket */ 88 BUG_ON(!pn->sobject); /* unbound socket */
90 89
@@ -107,8 +106,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
107 sock_hold(sknode); 106 sock_hold(sknode);
108 break; 107 break;
109 } 108 }
110 109 rcu_read_unlock();
111 spin_unlock_bh(&pnsocks.lock);
112 110
113 return rval; 111 return rval;
114} 112}
@@ -119,7 +117,7 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
119 struct hlist_head *hlist = pnsocks.hlist; 117 struct hlist_head *hlist = pnsocks.hlist;
120 unsigned h; 118 unsigned h;
121 119
122 spin_lock(&pnsocks.lock); 120 rcu_read_lock();
123 for (h = 0; h < PN_HASHSIZE; h++) { 121 for (h = 0; h < PN_HASHSIZE; h++) {
124 struct hlist_node *node; 122 struct hlist_node *node;
125 struct sock *sknode; 123 struct sock *sknode;
@@ -140,25 +138,26 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
140 } 138 }
141 hlist++; 139 hlist++;
142 } 140 }
143 spin_unlock(&pnsocks.lock); 141 rcu_read_unlock();
144} 142}
145 143
146void pn_sock_hash(struct sock *sk) 144void pn_sock_hash(struct sock *sk)
147{ 145{
148 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject); 146 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
149 147
150 spin_lock_bh(&pnsocks.lock); 148 mutex_lock(&pnsocks.lock);
151 sk_add_node(sk, hlist); 149 sk_add_node_rcu(sk, hlist);
152 spin_unlock_bh(&pnsocks.lock); 150 mutex_unlock(&pnsocks.lock);
153} 151}
154EXPORT_SYMBOL(pn_sock_hash); 152EXPORT_SYMBOL(pn_sock_hash);
155 153
156void pn_sock_unhash(struct sock *sk) 154void pn_sock_unhash(struct sock *sk)
157{ 155{
158 spin_lock_bh(&pnsocks.lock); 156 mutex_lock(&pnsocks.lock);
159 sk_del_node_init(sk); 157 sk_del_node_init_rcu(sk);
160 spin_unlock_bh(&pnsocks.lock); 158 mutex_unlock(&pnsocks.lock);
161 pn_sock_unbind_all_res(sk); 159 pn_sock_unbind_all_res(sk);
160 synchronize_rcu();
162} 161}
163EXPORT_SYMBOL(pn_sock_unhash); 162EXPORT_SYMBOL(pn_sock_unhash);
164 163
@@ -548,7 +547,7 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
548 unsigned h; 547 unsigned h;
549 548
550 for (h = 0; h < PN_HASHSIZE; h++) { 549 for (h = 0; h < PN_HASHSIZE; h++) {
551 sk_for_each(sknode, node, hlist) { 550 sk_for_each_rcu(sknode, node, hlist) {
552 if (!net_eq(net, sock_net(sknode))) 551 if (!net_eq(net, sock_net(sknode)))
553 continue; 552 continue;
554 if (!pos) 553 if (!pos)
@@ -572,9 +571,9 @@ static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
572} 571}
573 572
574static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos) 573static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
575 __acquires(pnsocks.lock) 574 __acquires(rcu)
576{ 575{
577 spin_lock_bh(&pnsocks.lock); 576 rcu_read_lock();
578 return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 577 return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
579} 578}
580 579
@@ -591,9 +590,9 @@ static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
591} 590}
592 591
593static void pn_sock_seq_stop(struct seq_file *seq, void *v) 592static void pn_sock_seq_stop(struct seq_file *seq, void *v)
594 __releases(pnsocks.lock) 593 __releases(rcu)
595{ 594{
596 spin_unlock_bh(&pnsocks.lock); 595 rcu_read_unlock();
597} 596}
598 597
599static int pn_sock_seq_show(struct seq_file *seq, void *v) 598static int pn_sock_seq_show(struct seq_file *seq, void *v)
@@ -721,13 +720,11 @@ void pn_sock_unbind_all_res(struct sock *sk)
721 } 720 }
722 mutex_unlock(&resource_mutex); 721 mutex_unlock(&resource_mutex);
723 722
724 if (match == 0)
725 return;
726 synchronize_rcu();
727 while (match > 0) { 723 while (match > 0) {
728 sock_put(sk); 724 __sock_put(sk);
729 match--; 725 match--;
730 } 726 }
727 /* Caller is responsible for RCU sync before final sock_put() */
731} 728}
732 729
733#ifdef CONFIG_PROC_FS 730#ifdef CONFIG_PROC_FS
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 7fce6dfd2180..48464ca13b24 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -22,3 +22,14 @@ config RFKILL_INPUT
22 depends on RFKILL 22 depends on RFKILL
23 depends on INPUT = y || RFKILL = INPUT 23 depends on INPUT = y || RFKILL = INPUT
24 default y if !EXPERT 24 default y if !EXPERT
25
26config RFKILL_REGULATOR
27 tristate "Generic rfkill regulator driver"
28 depends on RFKILL || !RFKILL
29 depends on REGULATOR
30 help
31 This options enable controlling radio transmitters connected to
32 voltage regulator using the regulator framework.
33
34 To compile this driver as a module, choose M here: the module will
35 be called rfkill-regulator.
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile
index 662105352691..d9a5a58ffd8c 100644
--- a/net/rfkill/Makefile
+++ b/net/rfkill/Makefile
@@ -5,3 +5,4 @@
5rfkill-y += core.o 5rfkill-y += core.o
6rfkill-$(CONFIG_RFKILL_INPUT) += input.o 6rfkill-$(CONFIG_RFKILL_INPUT) += input.o
7obj-$(CONFIG_RFKILL) += rfkill.o 7obj-$(CONFIG_RFKILL) += rfkill.o
8obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
new file mode 100644
index 000000000000..18dc512a10f3
--- /dev/null
+++ b/net/rfkill/rfkill-regulator.c
@@ -0,0 +1,164 @@
1/*
2 * rfkill-regulator.c - Regulator consumer driver for rfkill
3 *
4 * Copyright (C) 2009 Guiming Zhuo <gmzhuo@gmail.com>
5 * Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it>
6 *
7 * Implementation inspired by leds-regulator driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/platform_device.h>
19#include <linux/regulator/consumer.h>
20#include <linux/rfkill.h>
21#include <linux/rfkill-regulator.h>
22
23struct rfkill_regulator_data {
24 struct rfkill *rf_kill;
25 bool reg_enabled;
26
27 struct regulator *vcc;
28};
29
30static int rfkill_regulator_set_block(void *data, bool blocked)
31{
32 struct rfkill_regulator_data *rfkill_data = data;
33
34 pr_debug("%s: blocked: %d\n", __func__, blocked);
35
36 if (blocked) {
37 if (rfkill_data->reg_enabled) {
38 regulator_disable(rfkill_data->vcc);
39 rfkill_data->reg_enabled = 0;
40 }
41 } else {
42 if (!rfkill_data->reg_enabled) {
43 regulator_enable(rfkill_data->vcc);
44 rfkill_data->reg_enabled = 1;
45 }
46 }
47
48 pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__,
49 regulator_is_enabled(rfkill_data->vcc));
50
51 return 0;
52}
53
54struct rfkill_ops rfkill_regulator_ops = {
55 .set_block = rfkill_regulator_set_block,
56};
57
58static int __devinit rfkill_regulator_probe(struct platform_device *pdev)
59{
60 struct rfkill_regulator_platform_data *pdata = pdev->dev.platform_data;
61 struct rfkill_regulator_data *rfkill_data;
62 struct regulator *vcc;
63 struct rfkill *rf_kill;
64 int ret = 0;
65
66 if (pdata == NULL) {
67 dev_err(&pdev->dev, "no platform data\n");
68 return -ENODEV;
69 }
70
71 if (pdata->name == NULL || pdata->type == 0) {
72 dev_err(&pdev->dev, "invalid name or type in platform data\n");
73 return -EINVAL;
74 }
75
76 vcc = regulator_get_exclusive(&pdev->dev, "vrfkill");
77 if (IS_ERR(vcc)) {
78 dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
79 ret = PTR_ERR(vcc);
80 goto out;
81 }
82
83 rfkill_data = kzalloc(sizeof(*rfkill_data), GFP_KERNEL);
84 if (rfkill_data == NULL) {
85 ret = -ENOMEM;
86 goto err_data_alloc;
87 }
88
89 rf_kill = rfkill_alloc(pdata->name, &pdev->dev,
90 pdata->type,
91 &rfkill_regulator_ops, rfkill_data);
92 if (rf_kill == NULL) {
93 dev_err(&pdev->dev, "Cannot alloc rfkill device\n");
94 ret = -ENOMEM;
95 goto err_rfkill_alloc;
96 }
97
98 if (regulator_is_enabled(vcc)) {
99 dev_dbg(&pdev->dev, "Regulator already enabled\n");
100 rfkill_data->reg_enabled = 1;
101 }
102 rfkill_data->vcc = vcc;
103 rfkill_data->rf_kill = rf_kill;
104
105 ret = rfkill_register(rf_kill);
106 if (ret) {
107 dev_err(&pdev->dev, "Cannot register rfkill device\n");
108 goto err_rfkill_register;
109 }
110
111 platform_set_drvdata(pdev, rfkill_data);
112 dev_info(&pdev->dev, "%s initialized\n", pdata->name);
113
114 return 0;
115
116err_rfkill_register:
117 rfkill_destroy(rf_kill);
118err_rfkill_alloc:
119 kfree(rfkill_data);
120err_data_alloc:
121 regulator_put(vcc);
122out:
123 return ret;
124}
125
126static int __devexit rfkill_regulator_remove(struct platform_device *pdev)
127{
128 struct rfkill_regulator_data *rfkill_data = platform_get_drvdata(pdev);
129 struct rfkill *rf_kill = rfkill_data->rf_kill;
130
131 rfkill_unregister(rf_kill);
132 rfkill_destroy(rf_kill);
133 regulator_put(rfkill_data->vcc);
134 kfree(rfkill_data);
135
136 return 0;
137}
138
139static struct platform_driver rfkill_regulator_driver = {
140 .probe = rfkill_regulator_probe,
141 .remove = __devexit_p(rfkill_regulator_remove),
142 .driver = {
143 .name = "rfkill-regulator",
144 .owner = THIS_MODULE,
145 },
146};
147
148static int __init rfkill_regulator_init(void)
149{
150 return platform_driver_register(&rfkill_regulator_driver);
151}
152module_init(rfkill_regulator_init);
153
154static void __exit rfkill_regulator_exit(void)
155{
156 platform_driver_unregister(&rfkill_regulator_driver);
157}
158module_exit(rfkill_regulator_exit);
159
160MODULE_AUTHOR("Guiming Zhuo <gmzhuo@gmail.com>");
161MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
162MODULE_DESCRIPTION("Regulator consumer driver for rfkill");
163MODULE_LICENSE("GPL");
164MODULE_ALIAS("platform:rfkill-regulator");
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index a80aef6e3d1f..f9ea925ad9cb 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -682,10 +682,8 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
682 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) 682 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
683 return -EINVAL; 683 return -EINVAL;
684 684
685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { 685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
686 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n");
687 return -EADDRNOTAVAIL; 686 return -EADDRNOTAVAIL;
688 }
689 687
690 source = &addr->srose_call; 688 source = &addr->srose_call;
691 689
@@ -716,7 +714,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
716 rose_insert_socket(sk); 714 rose_insert_socket(sk);
717 715
718 sock_reset_flag(sk, SOCK_ZAPPED); 716 sock_reset_flag(sk, SOCK_ZAPPED);
719 SOCK_DEBUG(sk, "ROSE: socket is bound\n"); 717
720 return 0; 718 return 0;
721} 719}
722 720
@@ -1109,10 +1107,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1109 srose.srose_digis[n] = rose->dest_digis[n]; 1107 srose.srose_digis[n] = rose->dest_digis[n];
1110 } 1108 }
1111 1109
1112 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n");
1113
1114 /* Build a packet */ 1110 /* Build a packet */
1115 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
1116 /* Sanity check the packet size */ 1111 /* Sanity check the packet size */
1117 if (len > 65535) 1112 if (len > 65535)
1118 return -EMSGSIZE; 1113 return -EMSGSIZE;
@@ -1127,7 +1122,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1127 /* 1122 /*
1128 * Put the data on the end 1123 * Put the data on the end
1129 */ 1124 */
1130 SOCK_DEBUG(sk, "ROSE: Appending user data\n");
1131 1125
1132 skb_reset_transport_header(skb); 1126 skb_reset_transport_header(skb);
1133 skb_put(skb, len); 1127 skb_put(skb, len);
@@ -1152,8 +1146,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1152 */ 1146 */
1153 asmptr = skb_push(skb, ROSE_MIN_LEN); 1147 asmptr = skb_push(skb, ROSE_MIN_LEN);
1154 1148
1155 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n");
1156
1157 /* Build a ROSE Network header */ 1149 /* Build a ROSE Network header */
1158 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; 1150 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1159 asmptr[1] = (rose->lci >> 0) & 0xFF; 1151 asmptr[1] = (rose->lci >> 0) & 0xFF;
@@ -1162,10 +1154,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1162 if (qbit) 1154 if (qbit)
1163 asmptr[0] |= ROSE_Q_BIT; 1155 asmptr[0] |= ROSE_Q_BIT;
1164 1156
1165 SOCK_DEBUG(sk, "ROSE: Built header.\n");
1166
1167 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
1168
1169 if (sk->sk_state != TCP_ESTABLISHED) { 1157 if (sk->sk_state != TCP_ESTABLISHED) {
1170 kfree_skb(skb); 1158 kfree_skb(skb);
1171 return -ENOTCONN; 1159 return -ENOTCONN;
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index 55b93dc60d0c..b6ff06351d67 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -36,10 +36,11 @@ static void rxrpc_destroy_peer(struct work_struct *work);
36static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) 36static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
37{ 37{
38 struct rtable *rt; 38 struct rtable *rt;
39 struct flowi4 fl4;
39 40
40 peer->if_mtu = 1500; 41 peer->if_mtu = 1500;
41 42
42 rt = ip_route_output_ports(&init_net, NULL, 43 rt = ip_route_output_ports(&init_net, &fl4, NULL,
43 peer->srx.transport.sin.sin_addr.s_addr, 0, 44 peer->srx.transport.sin.sin_addr.s_addr, 0,
44 htons(7000), htons(7001), 45 htons(7000), htons(7001),
45 IPPROTO_UDP, 0, 0); 46 IPPROTO_UDP, 0, 0);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a7a5583d4f68..aeaa2110b699 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -239,6 +239,17 @@ config NET_SCH_CHOKE
239 To compile this code as a module, choose M here: the 239 To compile this code as a module, choose M here: the
240 module will be called sch_choke. 240 module will be called sch_choke.
241 241
242config NET_SCH_QFQ
243 tristate "Quick Fair Queueing scheduler (QFQ)"
244 help
245 Say Y here if you want to use the Quick Fair Queueing Scheduler (QFQ)
246 packet scheduling algorithm.
247
248 To compile this driver as a module, choose M here: the module
249 will be called sch_qfq.
250
251 If unsure, say N.
252
242config NET_SCH_INGRESS 253config NET_SCH_INGRESS
243 tristate "Ingress Qdisc" 254 tristate "Ingress Qdisc"
244 depends on NET_CLS_ACT 255 depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 2e77b8dba22e..dc5889c0a15a 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o 35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
36obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o 36obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
37obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o 37obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
38obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
38 39
39obj-$(CONFIG_NET_CLS_U32) += cls_u32.o 40obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
40obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o 41obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
new file mode 100644
index 000000000000..103343408593
--- /dev/null
+++ b/net/sched/sch_qfq.c
@@ -0,0 +1,1137 @@
1/*
2 * net/sched/sch_qfq.c Quick Fair Queueing Scheduler.
3 *
4 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/bitops.h>
14#include <linux/errno.h>
15#include <linux/netdevice.h>
16#include <linux/pkt_sched.h>
17#include <net/sch_generic.h>
18#include <net/pkt_sched.h>
19#include <net/pkt_cls.h>
20
21
22/* Quick Fair Queueing
23 ===================
24
25 Sources:
26
27 Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
28 Packet Scheduling with Tight Bandwidth Distribution Guarantees."
29
30 See also:
31 http://retis.sssup.it/~fabio/linux/qfq/
32 */
33
34/*
35
36 Virtual time computations.
37
38 S, F and V are all computed in fixed point arithmetic with
39 FRAC_BITS decimal bits.
40
41 QFQ_MAX_INDEX is the maximum index allowed for a group. We need
42 one bit per index.
43 QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
44
45 The layout of the bits is as below:
46
47 [ MTU_SHIFT ][ FRAC_BITS ]
48 [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
49 ^.__grp->index = 0
50 *.__grp->slot_shift
51
52 where MIN_SLOT_SHIFT is derived by difference from the others.
53
54 The max group index corresponds to Lmax/w_min, where
55 Lmax=1<<MTU_SHIFT, w_min = 1 .
56 From this, and knowing how many groups (MAX_INDEX) we want,
57 we can derive the shift corresponding to each group.
58
59 Because we often need to compute
60 F = S + len/w_i and V = V + len/wsum
61 instead of storing w_i store the value
62 inv_w = (1<<FRAC_BITS)/w_i
63 so we can do F = S + len * inv_w * wsum.
64 We use W_TOT in the formulas so we can easily move between
65 static and adaptive weight sum.
66
67 The per-scheduler-instance data contain all the data structures
68 for the scheduler: bitmaps and bucket lists.
69
70 */
71
72/*
73 * Maximum number of consecutive slots occupied by backlogged classes
74 * inside a group.
75 */
76#define QFQ_MAX_SLOTS 32
77
78/*
79 * Shifts used for class<->group mapping. We allow class weights that are
80 * in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the
81 * group with the smallest index that can support the L_i / r_i configured
82 * for the class.
83 *
84 * grp->index is the index of the group; and grp->slot_shift
85 * is the shift for the corresponding (scaled) sigma_i.
86 */
87#define QFQ_MAX_INDEX 19
88#define QFQ_MAX_WSHIFT 16
89
90#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT)
91#define QFQ_MAX_WSUM (2*QFQ_MAX_WEIGHT)
92
93#define FRAC_BITS 30 /* fixed point arithmetic */
94#define ONE_FP (1UL << FRAC_BITS)
95#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
96
97#define QFQ_MTU_SHIFT 11
98#define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
99
100/*
101 * Possible group states. These values are used as indexes for the bitmaps
102 * array of struct qfq_queue.
103 */
104enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
105
106struct qfq_group;
107
108struct qfq_class {
109 struct Qdisc_class_common common;
110
111 unsigned int refcnt;
112 unsigned int filter_cnt;
113
114 struct gnet_stats_basic_packed bstats;
115 struct gnet_stats_queue qstats;
116 struct gnet_stats_rate_est rate_est;
117 struct Qdisc *qdisc;
118
119 struct hlist_node next; /* Link for the slot list. */
120 u64 S, F; /* flow timestamps (exact) */
121
122 /* group we belong to. In principle we would need the index,
123 * which is log_2(lmax/weight), but we never reference it
124 * directly, only the group.
125 */
126 struct qfq_group *grp;
127
128 /* these are copied from the flowset. */
129 u32 inv_w; /* ONE_FP/weight */
130 u32 lmax; /* Max packet size for this flow. */
131};
132
133struct qfq_group {
134 u64 S, F; /* group timestamps (approx). */
135 unsigned int slot_shift; /* Slot shift. */
136 unsigned int index; /* Group index. */
137 unsigned int front; /* Index of the front slot. */
138 unsigned long full_slots; /* non-empty slots */
139
140 /* Array of RR lists of active classes. */
141 struct hlist_head slots[QFQ_MAX_SLOTS];
142};
143
144struct qfq_sched {
145 struct tcf_proto *filter_list;
146 struct Qdisc_class_hash clhash;
147
148 u64 V; /* Precise virtual time. */
149 u32 wsum; /* weight sum */
150
151 unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
152 struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
153};
154
155static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
156{
157 struct qfq_sched *q = qdisc_priv(sch);
158 struct Qdisc_class_common *clc;
159
160 clc = qdisc_class_find(&q->clhash, classid);
161 if (clc == NULL)
162 return NULL;
163 return container_of(clc, struct qfq_class, common);
164}
165
166static void qfq_purge_queue(struct qfq_class *cl)
167{
168 unsigned int len = cl->qdisc->q.qlen;
169
170 qdisc_reset(cl->qdisc);
171 qdisc_tree_decrease_qlen(cl->qdisc, len);
172}
173
174static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
175 [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
176 [TCA_QFQ_LMAX] = { .type = NLA_U32 },
177};
178
179/*
180 * Calculate a flow index, given its weight and maximum packet length.
181 * index = log_2(maxlen/weight) but we need to apply the scaling.
182 * This is used only once at flow creation.
183 */
184static int qfq_calc_index(u32 inv_w, unsigned int maxlen)
185{
186 u64 slot_size = (u64)maxlen * inv_w;
187 unsigned long size_map;
188 int index = 0;
189
190 size_map = slot_size >> QFQ_MIN_SLOT_SHIFT;
191 if (!size_map)
192 goto out;
193
194 index = __fls(size_map) + 1; /* basically a log_2 */
195 index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
196
197 if (index < 0)
198 index = 0;
199out:
200 pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
201 (unsigned long) ONE_FP/inv_w, maxlen, index);
202
203 return index;
204}
205
206static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
207 struct nlattr **tca, unsigned long *arg)
208{
209 struct qfq_sched *q = qdisc_priv(sch);
210 struct qfq_class *cl = (struct qfq_class *)*arg;
211 struct nlattr *tb[TCA_QFQ_MAX + 1];
212 u32 weight, lmax, inv_w;
213 int i, err;
214
215 if (tca[TCA_OPTIONS] == NULL) {
216 pr_notice("qfq: no options\n");
217 return -EINVAL;
218 }
219
220 err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy);
221 if (err < 0)
222 return err;
223
224 if (tb[TCA_QFQ_WEIGHT]) {
225 weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
226 if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
227 pr_notice("qfq: invalid weight %u\n", weight);
228 return -EINVAL;
229 }
230 } else
231 weight = 1;
232
233 inv_w = ONE_FP / weight;
234 weight = ONE_FP / inv_w;
235 if (q->wsum + weight > QFQ_MAX_WSUM) {
236 pr_notice("qfq: total weight out of range (%u + %u)\n",
237 weight, q->wsum);
238 return -EINVAL;
239 }
240
241 if (tb[TCA_QFQ_LMAX]) {
242 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
243 if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) {
244 pr_notice("qfq: invalid max length %u\n", lmax);
245 return -EINVAL;
246 }
247 } else
248 lmax = 1UL << QFQ_MTU_SHIFT;
249
250 if (cl != NULL) {
251 if (tca[TCA_RATE]) {
252 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
253 qdisc_root_sleeping_lock(sch),
254 tca[TCA_RATE]);
255 if (err)
256 return err;
257 }
258
259 sch_tree_lock(sch);
260 if (tb[TCA_QFQ_WEIGHT]) {
261 q->wsum = weight - ONE_FP / cl->inv_w;
262 cl->inv_w = inv_w;
263 }
264 sch_tree_unlock(sch);
265
266 return 0;
267 }
268
269 cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
270 if (cl == NULL)
271 return -ENOBUFS;
272
273 cl->refcnt = 1;
274 cl->common.classid = classid;
275 cl->lmax = lmax;
276 cl->inv_w = inv_w;
277 i = qfq_calc_index(cl->inv_w, cl->lmax);
278
279 cl->grp = &q->groups[i];
280 q->wsum += weight;
281
282 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
283 &pfifo_qdisc_ops, classid);
284 if (cl->qdisc == NULL)
285 cl->qdisc = &noop_qdisc;
286
287 if (tca[TCA_RATE]) {
288 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
289 qdisc_root_sleeping_lock(sch),
290 tca[TCA_RATE]);
291 if (err) {
292 qdisc_destroy(cl->qdisc);
293 kfree(cl);
294 return err;
295 }
296 }
297
298 sch_tree_lock(sch);
299 qdisc_class_hash_insert(&q->clhash, &cl->common);
300 sch_tree_unlock(sch);
301
302 qdisc_class_hash_grow(sch, &q->clhash);
303
304 *arg = (unsigned long)cl;
305 return 0;
306}
307
308static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
309{
310 struct qfq_sched *q = qdisc_priv(sch);
311
312 if (cl->inv_w) {
313 q->wsum -= ONE_FP / cl->inv_w;
314 cl->inv_w = 0;
315 }
316
317 gen_kill_estimator(&cl->bstats, &cl->rate_est);
318 qdisc_destroy(cl->qdisc);
319 kfree(cl);
320}
321
322static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
323{
324 struct qfq_sched *q = qdisc_priv(sch);
325 struct qfq_class *cl = (struct qfq_class *)arg;
326
327 if (cl->filter_cnt > 0)
328 return -EBUSY;
329
330 sch_tree_lock(sch);
331
332 qfq_purge_queue(cl);
333 qdisc_class_hash_remove(&q->clhash, &cl->common);
334
335 BUG_ON(--cl->refcnt == 0);
336 /*
337 * This shouldn't happen: we "hold" one cops->get() when called
338 * from tc_ctl_tclass; the destroy method is done from cops->put().
339 */
340
341 sch_tree_unlock(sch);
342 return 0;
343}
344
345static unsigned long qfq_get_class(struct Qdisc *sch, u32 classid)
346{
347 struct qfq_class *cl = qfq_find_class(sch, classid);
348
349 if (cl != NULL)
350 cl->refcnt++;
351
352 return (unsigned long)cl;
353}
354
355static void qfq_put_class(struct Qdisc *sch, unsigned long arg)
356{
357 struct qfq_class *cl = (struct qfq_class *)arg;
358
359 if (--cl->refcnt == 0)
360 qfq_destroy_class(sch, cl);
361}
362
363static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl)
364{
365 struct qfq_sched *q = qdisc_priv(sch);
366
367 if (cl)
368 return NULL;
369
370 return &q->filter_list;
371}
372
373static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
374 u32 classid)
375{
376 struct qfq_class *cl = qfq_find_class(sch, classid);
377
378 if (cl != NULL)
379 cl->filter_cnt++;
380
381 return (unsigned long)cl;
382}
383
384static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
385{
386 struct qfq_class *cl = (struct qfq_class *)arg;
387
388 cl->filter_cnt--;
389}
390
391static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
392 struct Qdisc *new, struct Qdisc **old)
393{
394 struct qfq_class *cl = (struct qfq_class *)arg;
395
396 if (new == NULL) {
397 new = qdisc_create_dflt(sch->dev_queue,
398 &pfifo_qdisc_ops, cl->common.classid);
399 if (new == NULL)
400 new = &noop_qdisc;
401 }
402
403 sch_tree_lock(sch);
404 qfq_purge_queue(cl);
405 *old = cl->qdisc;
406 cl->qdisc = new;
407 sch_tree_unlock(sch);
408 return 0;
409}
410
411static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
412{
413 struct qfq_class *cl = (struct qfq_class *)arg;
414
415 return cl->qdisc;
416}
417
418static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
419 struct sk_buff *skb, struct tcmsg *tcm)
420{
421 struct qfq_class *cl = (struct qfq_class *)arg;
422 struct nlattr *nest;
423
424 tcm->tcm_parent = TC_H_ROOT;
425 tcm->tcm_handle = cl->common.classid;
426 tcm->tcm_info = cl->qdisc->handle;
427
428 nest = nla_nest_start(skb, TCA_OPTIONS);
429 if (nest == NULL)
430 goto nla_put_failure;
431 NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w);
432 NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax);
433 return nla_nest_end(skb, nest);
434
435nla_put_failure:
436 nla_nest_cancel(skb, nest);
437 return -EMSGSIZE;
438}
439
440static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
441 struct gnet_dump *d)
442{
443 struct qfq_class *cl = (struct qfq_class *)arg;
444 struct tc_qfq_stats xstats;
445
446 memset(&xstats, 0, sizeof(xstats));
447 cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
448
449 xstats.weight = ONE_FP/cl->inv_w;
450 xstats.lmax = cl->lmax;
451
452 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
453 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
454 gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
455 return -1;
456
457 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
458}
459
460static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
461{
462 struct qfq_sched *q = qdisc_priv(sch);
463 struct qfq_class *cl;
464 struct hlist_node *n;
465 unsigned int i;
466
467 if (arg->stop)
468 return;
469
470 for (i = 0; i < q->clhash.hashsize; i++) {
471 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
472 if (arg->count < arg->skip) {
473 arg->count++;
474 continue;
475 }
476 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
477 arg->stop = 1;
478 return;
479 }
480 arg->count++;
481 }
482 }
483}
484
485static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
486 int *qerr)
487{
488 struct qfq_sched *q = qdisc_priv(sch);
489 struct qfq_class *cl;
490 struct tcf_result res;
491 int result;
492
493 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
494 pr_debug("qfq_classify: found %d\n", skb->priority);
495 cl = qfq_find_class(sch, skb->priority);
496 if (cl != NULL)
497 return cl;
498 }
499
500 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
501 result = tc_classify(skb, q->filter_list, &res);
502 if (result >= 0) {
503#ifdef CONFIG_NET_CLS_ACT
504 switch (result) {
505 case TC_ACT_QUEUED:
506 case TC_ACT_STOLEN:
507 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
508 case TC_ACT_SHOT:
509 return NULL;
510 }
511#endif
512 cl = (struct qfq_class *)res.class;
513 if (cl == NULL)
514 cl = qfq_find_class(sch, res.classid);
515 return cl;
516 }
517
518 return NULL;
519}
520
521/* Generic comparison function, handling wraparound. */
522static inline int qfq_gt(u64 a, u64 b)
523{
524 return (s64)(a - b) > 0;
525}
526
527/* Round a precise timestamp to its slotted value. */
528static inline u64 qfq_round_down(u64 ts, unsigned int shift)
529{
530 return ts & ~((1ULL << shift) - 1);
531}
532
533/* return the pointer to the group with lowest index in the bitmap */
534static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
535 unsigned long bitmap)
536{
537 int index = __ffs(bitmap);
538 return &q->groups[index];
539}
540/* Calculate a mask to mimic what would be ffs_from(). */
541static inline unsigned long mask_from(unsigned long bitmap, int from)
542{
543 return bitmap & ~((1UL << from) - 1);
544}
545
546/*
547 * The state computation relies on ER=0, IR=1, EB=2, IB=3
548 * First compute eligibility comparing grp->S, q->V,
549 * then check if someone is blocking us and possibly add EB
550 */
551static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
552{
553 /* if S > V we are not eligible */
554 unsigned int state = qfq_gt(grp->S, q->V);
555 unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
556 struct qfq_group *next;
557
558 if (mask) {
559 next = qfq_ffs(q, mask);
560 if (qfq_gt(grp->F, next->F))
561 state |= EB;
562 }
563
564 return state;
565}
566
567
568/*
569 * In principle
570 * q->bitmaps[dst] |= q->bitmaps[src] & mask;
571 * q->bitmaps[src] &= ~mask;
572 * but we should make sure that src != dst
573 */
574static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
575 int src, int dst)
576{
577 q->bitmaps[dst] |= q->bitmaps[src] & mask;
578 q->bitmaps[src] &= ~mask;
579}
580
581static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
582{
583 unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
584 struct qfq_group *next;
585
586 if (mask) {
587 next = qfq_ffs(q, mask);
588 if (!qfq_gt(next->F, old_F))
589 return;
590 }
591
592 mask = (1UL << index) - 1;
593 qfq_move_groups(q, mask, EB, ER);
594 qfq_move_groups(q, mask, IB, IR);
595}
596
597/*
598 * perhaps
599 *
600 old_V ^= q->V;
601 old_V >>= QFQ_MIN_SLOT_SHIFT;
602 if (old_V) {
603 ...
604 }
605 *
606 */
607static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
608{
609 unsigned long vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
610 unsigned long old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
611
612 if (vslot != old_vslot) {
613 unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
614 qfq_move_groups(q, mask, IR, ER);
615 qfq_move_groups(q, mask, IB, EB);
616 }
617}
618
619
620/*
621 * XXX we should make sure that slot becomes less than 32.
622 * This is guaranteed by the input values.
623 * roundedS is always cl->S rounded on grp->slot_shift bits.
624 */
625static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
626 u64 roundedS)
627{
628 u64 slot = (roundedS - grp->S) >> grp->slot_shift;
629 unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
630
631 hlist_add_head(&cl->next, &grp->slots[i]);
632 __set_bit(slot, &grp->full_slots);
633}
634
635/* Maybe introduce hlist_first_entry?? */
636static struct qfq_class *qfq_slot_head(struct qfq_group *grp)
637{
638 return hlist_entry(grp->slots[grp->front].first,
639 struct qfq_class, next);
640}
641
642/*
643 * remove the entry from the slot
644 */
645static void qfq_front_slot_remove(struct qfq_group *grp)
646{
647 struct qfq_class *cl = qfq_slot_head(grp);
648
649 BUG_ON(!cl);
650 hlist_del(&cl->next);
651 if (hlist_empty(&grp->slots[grp->front]))
652 __clear_bit(0, &grp->full_slots);
653}
654
655/*
656 * Returns the first full queue in a group. As a side effect,
657 * adjust the bucket list so the first non-empty bucket is at
658 * position 0 in full_slots.
659 */
660static struct qfq_class *qfq_slot_scan(struct qfq_group *grp)
661{
662 unsigned int i;
663
664 pr_debug("qfq slot_scan: grp %u full %#lx\n",
665 grp->index, grp->full_slots);
666
667 if (grp->full_slots == 0)
668 return NULL;
669
670 i = __ffs(grp->full_slots); /* zero based */
671 if (i > 0) {
672 grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
673 grp->full_slots >>= i;
674 }
675
676 return qfq_slot_head(grp);
677}
678
679/*
680 * adjust the bucket list. When the start time of a group decreases,
681 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
682 * move the objects. The mask of occupied slots must be shifted
683 * because we use ffs() to find the first non-empty slot.
684 * This covers decreases in the group's start time, but what about
685 * increases of the start time ?
686 * Here too we should make sure that i is less than 32
687 */
688static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
689{
690 unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
691
692 grp->full_slots <<= i;
693 grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
694}
695
696static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
697{
698 struct qfq_group *grp;
699 unsigned long ineligible;
700
701 ineligible = q->bitmaps[IR] | q->bitmaps[IB];
702 if (ineligible) {
703 if (!q->bitmaps[ER]) {
704 grp = qfq_ffs(q, ineligible);
705 if (qfq_gt(grp->S, q->V))
706 q->V = grp->S;
707 }
708 qfq_make_eligible(q, old_V);
709 }
710}
711
712/* What is length of next packet in queue (0 if queue is empty) */
713static unsigned int qdisc_peek_len(struct Qdisc *sch)
714{
715 struct sk_buff *skb;
716
717 skb = sch->ops->peek(sch);
718 return skb ? qdisc_pkt_len(skb) : 0;
719}
720
721/*
722 * Updates the class, returns true if also the group needs to be updated.
723 */
724static bool qfq_update_class(struct qfq_group *grp, struct qfq_class *cl)
725{
726 unsigned int len = qdisc_peek_len(cl->qdisc);
727
728 cl->S = cl->F;
729 if (!len)
730 qfq_front_slot_remove(grp); /* queue is empty */
731 else {
732 u64 roundedS;
733
734 cl->F = cl->S + (u64)len * cl->inv_w;
735 roundedS = qfq_round_down(cl->S, grp->slot_shift);
736 if (roundedS == grp->S)
737 return false;
738
739 qfq_front_slot_remove(grp);
740 qfq_slot_insert(grp, cl, roundedS);
741 }
742
743 return true;
744}
745
746static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
747{
748 struct qfq_sched *q = qdisc_priv(sch);
749 struct qfq_group *grp;
750 struct qfq_class *cl;
751 struct sk_buff *skb;
752 unsigned int len;
753 u64 old_V;
754
755 if (!q->bitmaps[ER])
756 return NULL;
757
758 grp = qfq_ffs(q, q->bitmaps[ER]);
759
760 cl = qfq_slot_head(grp);
761 skb = qdisc_dequeue_peeked(cl->qdisc);
762 if (!skb) {
763 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
764 return NULL;
765 }
766
767 sch->q.qlen--;
768 qdisc_bstats_update(sch, skb);
769
770 old_V = q->V;
771 len = qdisc_pkt_len(skb);
772 q->V += (u64)len * IWSUM;
773 pr_debug("qfq dequeue: len %u F %lld now %lld\n",
774 len, (unsigned long long) cl->F, (unsigned long long) q->V);
775
776 if (qfq_update_class(grp, cl)) {
777 u64 old_F = grp->F;
778
779 cl = qfq_slot_scan(grp);
780 if (!cl)
781 __clear_bit(grp->index, &q->bitmaps[ER]);
782 else {
783 u64 roundedS = qfq_round_down(cl->S, grp->slot_shift);
784 unsigned int s;
785
786 if (grp->S == roundedS)
787 goto skip_unblock;
788 grp->S = roundedS;
789 grp->F = roundedS + (2ULL << grp->slot_shift);
790 __clear_bit(grp->index, &q->bitmaps[ER]);
791 s = qfq_calc_state(q, grp);
792 __set_bit(grp->index, &q->bitmaps[s]);
793 }
794
795 qfq_unblock_groups(q, grp->index, old_F);
796 }
797
798skip_unblock:
799 qfq_update_eligible(q, old_V);
800
801 return skb;
802}
803
804/*
805 * Assign a reasonable start time for a new flow k in group i.
806 * Admissible values for \hat(F) are multiples of \sigma_i
807 * no greater than V+\sigma_i . Larger values mean that
808 * we had a wraparound so we consider the timestamp to be stale.
809 *
810 * If F is not stale and F >= V then we set S = F.
811 * Otherwise we should assign S = V, but this may violate
812 * the ordering in ER. So, if we have groups in ER, set S to
813 * the F_j of the first group j which would be blocking us.
814 * We are guaranteed not to move S backward because
815 * otherwise our group i would still be blocked.
816 */
817static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
818{
819 unsigned long mask;
820 uint32_t limit, roundedF;
821 int slot_shift = cl->grp->slot_shift;
822
823 roundedF = qfq_round_down(cl->F, slot_shift);
824 limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
825
826 if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
827 /* timestamp was stale */
828 mask = mask_from(q->bitmaps[ER], cl->grp->index);
829 if (mask) {
830 struct qfq_group *next = qfq_ffs(q, mask);
831 if (qfq_gt(roundedF, next->F)) {
832 cl->S = next->F;
833 return;
834 }
835 }
836 cl->S = q->V;
837 } else /* timestamp is not stale */
838 cl->S = cl->F;
839}
840
841static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
842{
843 struct qfq_sched *q = qdisc_priv(sch);
844 struct qfq_group *grp;
845 struct qfq_class *cl;
846 int err;
847 u64 roundedS;
848 int s;
849
850 cl = qfq_classify(skb, sch, &err);
851 if (cl == NULL) {
852 if (err & __NET_XMIT_BYPASS)
853 sch->qstats.drops++;
854 kfree_skb(skb);
855 return err;
856 }
857 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
858
859 err = qdisc_enqueue(skb, cl->qdisc);
860 if (unlikely(err != NET_XMIT_SUCCESS)) {
861 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
862 if (net_xmit_drop_count(err)) {
863 cl->qstats.drops++;
864 sch->qstats.drops++;
865 }
866 return err;
867 }
868
869 bstats_update(&cl->bstats, skb);
870 ++sch->q.qlen;
871
872 /* If the new skb is not the head of queue, then done here. */
873 if (cl->qdisc->q.qlen != 1)
874 return err;
875
876 /* If reach this point, queue q was idle */
877 grp = cl->grp;
878 qfq_update_start(q, cl);
879
880 /* compute new finish time and rounded start. */
881 cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w;
882 roundedS = qfq_round_down(cl->S, grp->slot_shift);
883
884 /*
885 * insert cl in the correct bucket.
886 * If cl->S >= grp->S we don't need to adjust the
887 * bucket list and simply go to the insertion phase.
888 * Otherwise grp->S is decreasing, we must make room
889 * in the bucket list, and also recompute the group state.
890 * Finally, if there were no flows in this group and nobody
891 * was in ER make sure to adjust V.
892 */
893 if (grp->full_slots) {
894 if (!qfq_gt(grp->S, cl->S))
895 goto skip_update;
896
897 /* create a slot for this cl->S */
898 qfq_slot_rotate(grp, roundedS);
899 /* group was surely ineligible, remove */
900 __clear_bit(grp->index, &q->bitmaps[IR]);
901 __clear_bit(grp->index, &q->bitmaps[IB]);
902 } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
903 q->V = roundedS;
904
905 grp->S = roundedS;
906 grp->F = roundedS + (2ULL << grp->slot_shift);
907 s = qfq_calc_state(q, grp);
908 __set_bit(grp->index, &q->bitmaps[s]);
909
910 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
911 s, q->bitmaps[s],
912 (unsigned long long) cl->S,
913 (unsigned long long) cl->F,
914 (unsigned long long) q->V);
915
916skip_update:
917 qfq_slot_insert(grp, cl, roundedS);
918
919 return err;
920}
921
922
923static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
924 struct qfq_class *cl)
925{
926 unsigned int i, offset;
927 u64 roundedS;
928
929 roundedS = qfq_round_down(cl->S, grp->slot_shift);
930 offset = (roundedS - grp->S) >> grp->slot_shift;
931 i = (grp->front + offset) % QFQ_MAX_SLOTS;
932
933 hlist_del(&cl->next);
934 if (hlist_empty(&grp->slots[i]))
935 __clear_bit(offset, &grp->full_slots);
936}
937
938/*
939 * called to forcibly destroy a queue.
940 * If the queue is not in the front bucket, or if it has
941 * other queues in the front bucket, we can simply remove
942 * the queue with no other side effects.
943 * Otherwise we must propagate the event up.
944 */
945static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
946{
947 struct qfq_group *grp = cl->grp;
948 unsigned long mask;
949 u64 roundedS;
950 int s;
951
952 cl->F = cl->S;
953 qfq_slot_remove(q, grp, cl);
954
955 if (!grp->full_slots) {
956 __clear_bit(grp->index, &q->bitmaps[IR]);
957 __clear_bit(grp->index, &q->bitmaps[EB]);
958 __clear_bit(grp->index, &q->bitmaps[IB]);
959
960 if (test_bit(grp->index, &q->bitmaps[ER]) &&
961 !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
962 mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
963 if (mask)
964 mask = ~((1UL << __fls(mask)) - 1);
965 else
966 mask = ~0UL;
967 qfq_move_groups(q, mask, EB, ER);
968 qfq_move_groups(q, mask, IB, IR);
969 }
970 __clear_bit(grp->index, &q->bitmaps[ER]);
971 } else if (hlist_empty(&grp->slots[grp->front])) {
972 cl = qfq_slot_scan(grp);
973 roundedS = qfq_round_down(cl->S, grp->slot_shift);
974 if (grp->S != roundedS) {
975 __clear_bit(grp->index, &q->bitmaps[ER]);
976 __clear_bit(grp->index, &q->bitmaps[IR]);
977 __clear_bit(grp->index, &q->bitmaps[EB]);
978 __clear_bit(grp->index, &q->bitmaps[IB]);
979 grp->S = roundedS;
980 grp->F = roundedS + (2ULL << grp->slot_shift);
981 s = qfq_calc_state(q, grp);
982 __set_bit(grp->index, &q->bitmaps[s]);
983 }
984 }
985
986 qfq_update_eligible(q, q->V);
987}
988
989static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
990{
991 struct qfq_sched *q = qdisc_priv(sch);
992 struct qfq_class *cl = (struct qfq_class *)arg;
993
994 if (cl->qdisc->q.qlen == 0)
995 qfq_deactivate_class(q, cl);
996}
997
998static unsigned int qfq_drop(struct Qdisc *sch)
999{
1000 struct qfq_sched *q = qdisc_priv(sch);
1001 struct qfq_group *grp;
1002 unsigned int i, j, len;
1003
1004 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1005 grp = &q->groups[i];
1006 for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1007 struct qfq_class *cl;
1008 struct hlist_node *n;
1009
1010 hlist_for_each_entry(cl, n, &grp->slots[j], next) {
1011
1012 if (!cl->qdisc->ops->drop)
1013 continue;
1014
1015 len = cl->qdisc->ops->drop(cl->qdisc);
1016 if (len > 0) {
1017 sch->q.qlen--;
1018 if (!cl->qdisc->q.qlen)
1019 qfq_deactivate_class(q, cl);
1020
1021 return len;
1022 }
1023 }
1024 }
1025 }
1026
1027 return 0;
1028}
1029
1030static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1031{
1032 struct qfq_sched *q = qdisc_priv(sch);
1033 struct qfq_group *grp;
1034 int i, j, err;
1035
1036 err = qdisc_class_hash_init(&q->clhash);
1037 if (err < 0)
1038 return err;
1039
1040 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1041 grp = &q->groups[i];
1042 grp->index = i;
1043 grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS
1044 - (QFQ_MAX_INDEX - i);
1045 for (j = 0; j < QFQ_MAX_SLOTS; j++)
1046 INIT_HLIST_HEAD(&grp->slots[j]);
1047 }
1048
1049 return 0;
1050}
1051
1052static void qfq_reset_qdisc(struct Qdisc *sch)
1053{
1054 struct qfq_sched *q = qdisc_priv(sch);
1055 struct qfq_group *grp;
1056 struct qfq_class *cl;
1057 struct hlist_node *n, *tmp;
1058 unsigned int i, j;
1059
1060 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1061 grp = &q->groups[i];
1062 for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1063 hlist_for_each_entry_safe(cl, n, tmp,
1064 &grp->slots[j], next) {
1065 qfq_deactivate_class(q, cl);
1066 }
1067 }
1068 }
1069
1070 for (i = 0; i < q->clhash.hashsize; i++) {
1071 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1072 qdisc_reset(cl->qdisc);
1073 }
1074 sch->q.qlen = 0;
1075}
1076
1077static void qfq_destroy_qdisc(struct Qdisc *sch)
1078{
1079 struct qfq_sched *q = qdisc_priv(sch);
1080 struct qfq_class *cl;
1081 struct hlist_node *n, *next;
1082 unsigned int i;
1083
1084 tcf_destroy_chain(&q->filter_list);
1085
1086 for (i = 0; i < q->clhash.hashsize; i++) {
1087 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1088 common.hnode) {
1089 qfq_destroy_class(sch, cl);
1090 }
1091 }
1092 qdisc_class_hash_destroy(&q->clhash);
1093}
1094
1095static const struct Qdisc_class_ops qfq_class_ops = {
1096 .change = qfq_change_class,
1097 .delete = qfq_delete_class,
1098 .get = qfq_get_class,
1099 .put = qfq_put_class,
1100 .tcf_chain = qfq_tcf_chain,
1101 .bind_tcf = qfq_bind_tcf,
1102 .unbind_tcf = qfq_unbind_tcf,
1103 .graft = qfq_graft_class,
1104 .leaf = qfq_class_leaf,
1105 .qlen_notify = qfq_qlen_notify,
1106 .dump = qfq_dump_class,
1107 .dump_stats = qfq_dump_class_stats,
1108 .walk = qfq_walk,
1109};
1110
1111static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
1112 .cl_ops = &qfq_class_ops,
1113 .id = "qfq",
1114 .priv_size = sizeof(struct qfq_sched),
1115 .enqueue = qfq_enqueue,
1116 .dequeue = qfq_dequeue,
1117 .peek = qdisc_peek_dequeued,
1118 .drop = qfq_drop,
1119 .init = qfq_init_qdisc,
1120 .reset = qfq_reset_qdisc,
1121 .destroy = qfq_destroy_qdisc,
1122 .owner = THIS_MODULE,
1123};
1124
1125static int __init qfq_init(void)
1126{
1127 return register_qdisc(&qfq_qdisc_ops);
1128}
1129
1130static void __exit qfq_exit(void)
1131{
1132 unregister_qdisc(&qfq_qdisc_ops);
1133}
1134
1135module_init(qfq_init);
1136module_exit(qfq_exit);
1137MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index c2e628dfaacc..7ef87f9eb675 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -169,7 +169,7 @@ static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
169 } 169 }
170 case htons(ETH_P_IPV6): 170 case htons(ETH_P_IPV6):
171 { 171 {
172 struct ipv6hdr *iph; 172 const struct ipv6hdr *iph;
173 int poff; 173 int poff;
174 174
175 if (!pskb_network_may_pull(skb, sizeof(*iph))) 175 if (!pskb_network_may_pull(skb, sizeof(*iph)))
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index bf24fa697de2..ec997cfe0a7e 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -98,7 +98,6 @@ const char *sctp_cname(const sctp_subtype_t cid)
98 98
99/* These are printable forms of the states. */ 99/* These are printable forms of the states. */
100const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = { 100const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = {
101 "STATE_EMPTY",
102 "STATE_CLOSED", 101 "STATE_CLOSED",
103 "STATE_COOKIE_WAIT", 102 "STATE_COOKIE_WAIT",
104 "STATE_COOKIE_ECHOED", 103 "STATE_COOKIE_ECHOED",
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index e10acc01c75f..c8cc24e282c3 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -325,6 +325,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
325 struct sctp_transport **transport) 325 struct sctp_transport **transport)
326{ 326{
327 struct sctp_association *asoc = NULL; 327 struct sctp_association *asoc = NULL;
328 struct sctp_association *tmp;
328 struct sctp_transport *t = NULL; 329 struct sctp_transport *t = NULL;
329 struct sctp_hashbucket *head; 330 struct sctp_hashbucket *head;
330 struct sctp_ep_common *epb; 331 struct sctp_ep_common *epb;
@@ -333,25 +334,32 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
333 int rport; 334 int rport;
334 335
335 *transport = NULL; 336 *transport = NULL;
337
338 /* If the local port is not set, there can't be any associations
339 * on this endpoint.
340 */
341 if (!ep->base.bind_addr.port)
342 goto out;
343
336 rport = ntohs(paddr->v4.sin_port); 344 rport = ntohs(paddr->v4.sin_port);
337 345
338 hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport); 346 hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
339 head = &sctp_assoc_hashtable[hash]; 347 head = &sctp_assoc_hashtable[hash];
340 read_lock(&head->lock); 348 read_lock(&head->lock);
341 sctp_for_each_hentry(epb, node, &head->chain) { 349 sctp_for_each_hentry(epb, node, &head->chain) {
342 asoc = sctp_assoc(epb); 350 tmp = sctp_assoc(epb);
343 if (asoc->ep != ep || rport != asoc->peer.port) 351 if (tmp->ep != ep || rport != tmp->peer.port)
344 goto next; 352 continue;
345 353
346 t = sctp_assoc_lookup_paddr(asoc, paddr); 354 t = sctp_assoc_lookup_paddr(tmp, paddr);
347 if (t) { 355 if (t) {
356 asoc = tmp;
348 *transport = t; 357 *transport = t;
349 break; 358 break;
350 } 359 }
351next:
352 asoc = NULL;
353 } 360 }
354 read_unlock(&head->lock); 361 read_unlock(&head->lock);
362out:
355 return asoc; 363 return asoc;
356} 364}
357 365
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5436c6921167..741ed1648838 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -565,7 +565,7 @@ void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
565 */ 565 */
566void sctp_v4_err(struct sk_buff *skb, __u32 info) 566void sctp_v4_err(struct sk_buff *skb, __u32 info)
567{ 567{
568 struct iphdr *iph = (struct iphdr *)skb->data; 568 const struct iphdr *iph = (const struct iphdr *)skb->data;
569 const int ihlen = iph->ihl * 4; 569 const int ihlen = iph->ihl * 4;
570 const int type = icmp_hdr(skb)->type; 570 const int type = icmp_hdr(skb)->type;
571 const int code = icmp_hdr(skb)->code; 571 const int code = icmp_hdr(skb)->code;
@@ -661,7 +661,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
661{ 661{
662 sctp_chunkhdr_t *ch; 662 sctp_chunkhdr_t *ch;
663 __u8 *ch_end; 663 __u8 *ch_end;
664 sctp_errhdr_t *err;
665 664
666 ch = (sctp_chunkhdr_t *) skb->data; 665 ch = (sctp_chunkhdr_t *) skb->data;
667 666
@@ -697,20 +696,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
697 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) 696 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
698 goto discard; 697 goto discard;
699 698
700 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
701 * or a COOKIE ACK the SCTP Packet should be silently
702 * discarded.
703 */
704 if (SCTP_CID_COOKIE_ACK == ch->type)
705 goto discard;
706
707 if (SCTP_CID_ERROR == ch->type) {
708 sctp_walk_errors(err, ch) {
709 if (SCTP_ERROR_STALE_COOKIE == err->cause)
710 goto discard;
711 }
712 }
713
714 ch = (sctp_chunkhdr_t *) ch_end; 699 ch = (sctp_chunkhdr_t *) ch_end;
715 } while (ch_end < skb_tail_pointer(skb)); 700 } while (ch_end < skb_tail_pointer(skb));
716 701
@@ -1017,7 +1002,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
1017 /* Skip over the ADDIP header and find the Address parameter */ 1002 /* Skip over the ADDIP header and find the Address parameter */
1018 param = (union sctp_addr_param *)(asconf + 1); 1003 param = (union sctp_addr_param *)(asconf + 1);
1019 1004
1020 af = sctp_get_af_specific(param_type2af(param->v4.param_hdr.type)); 1005 af = sctp_get_af_specific(param_type2af(param->p.type));
1021 if (unlikely(!af)) 1006 if (unlikely(!af))
1022 return NULL; 1007 return NULL;
1023 1008
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 865ce7ba4e14..500875f4dc41 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -80,6 +80,13 @@
80 80
81#include <asm/uaccess.h> 81#include <asm/uaccess.h>
82 82
83static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
84 union sctp_addr *s2);
85static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
86 __be16 port);
87static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
88 const union sctp_addr *addr2);
89
83/* Event handler for inet6 address addition/deletion events. 90/* Event handler for inet6 address addition/deletion events.
84 * The sctp_local_addr_list needs to be protocted by a spin lock since 91 * The sctp_local_addr_list needs to be protocted by a spin lock since
85 * multiple notifiers (say IPv4 and IPv6) may be running at the same 92 * multiple notifiers (say IPv4 and IPv6) may be running at the same
@@ -240,37 +247,107 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
240/* Returns the dst cache entry for the given source and destination ip 247/* Returns the dst cache entry for the given source and destination ip
241 * addresses. 248 * addresses.
242 */ 249 */
243static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc, 250static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
244 union sctp_addr *daddr, 251 struct flowi *fl, struct sock *sk)
245 union sctp_addr *saddr)
246{ 252{
247 struct dst_entry *dst; 253 struct sctp_association *asoc = t->asoc;
248 struct flowi6 fl6; 254 struct dst_entry *dst = NULL;
255 struct flowi6 *fl6 = &fl->u.ip6;
256 struct sctp_bind_addr *bp;
257 struct sctp_sockaddr_entry *laddr;
258 union sctp_addr *baddr = NULL;
259 union sctp_addr *daddr = &t->ipaddr;
260 union sctp_addr dst_saddr;
261 __u8 matchlen = 0;
262 __u8 bmatchlen;
263 sctp_scope_t scope;
249 264
250 memset(&fl6, 0, sizeof(fl6)); 265 memset(fl6, 0, sizeof(struct flowi6));
251 ipv6_addr_copy(&fl6.daddr, &daddr->v6.sin6_addr); 266 ipv6_addr_copy(&fl6->daddr, &daddr->v6.sin6_addr);
267 fl6->fl6_dport = daddr->v6.sin6_port;
268 fl6->flowi6_proto = IPPROTO_SCTP;
252 if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) 269 if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
253 fl6.flowi6_oif = daddr->v6.sin6_scope_id; 270 fl6->flowi6_oif = daddr->v6.sin6_scope_id;
254 271
272 SCTP_DEBUG_PRINTK("%s: DST=%pI6 ", __func__, &fl6->daddr);
255 273
256 SCTP_DEBUG_PRINTK("%s: DST=%pI6 ", __func__, &fl6.daddr); 274 if (asoc)
275 fl6->fl6_sport = htons(asoc->base.bind_addr.port);
257 276
258 if (saddr) { 277 if (saddr) {
259 ipv6_addr_copy(&fl6.saddr, &saddr->v6.sin6_addr); 278 ipv6_addr_copy(&fl6->saddr, &saddr->v6.sin6_addr);
260 SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6.saddr); 279 fl6->fl6_sport = saddr->v6.sin6_port;
280 SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
281 }
282
283 dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
284 if (!asoc || saddr)
285 goto out;
286
287 bp = &asoc->base.bind_addr;
288 scope = sctp_scope(daddr);
289 /* ip6_dst_lookup has filled in the fl6->saddr for us. Check
290 * to see if we can use it.
291 */
292 if (!IS_ERR(dst)) {
293 /* Walk through the bind address list and look for a bind
294 * address that matches the source address of the returned dst.
295 */
296 sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port));
297 rcu_read_lock();
298 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
299 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
300 continue;
301
302 /* Do not compare against v4 addrs */
303 if ((laddr->a.sa.sa_family == AF_INET6) &&
304 (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) {
305 rcu_read_unlock();
306 goto out;
307 }
308 }
309 rcu_read_unlock();
310 /* None of the bound addresses match the source address of the
311 * dst. So release it.
312 */
313 dst_release(dst);
314 dst = NULL;
261 } 315 }
262 316
263 dst = ip6_route_output(&init_net, NULL, &fl6); 317 /* Walk through the bind address list and try to get the
264 if (!dst->error) { 318 * best source address for a given destination.
319 */
320 rcu_read_lock();
321 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
322 if (!laddr->valid && laddr->state != SCTP_ADDR_SRC)
323 continue;
324 if ((laddr->a.sa.sa_family == AF_INET6) &&
325 (scope <= sctp_scope(&laddr->a))) {
326 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
327 if (!baddr || (matchlen < bmatchlen)) {
328 baddr = &laddr->a;
329 matchlen = bmatchlen;
330 }
331 }
332 }
333 rcu_read_unlock();
334 if (baddr) {
335 ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr);
336 fl6->fl6_sport = baddr->v6.sin6_port;
337 dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
338 }
339
340out:
341 if (!IS_ERR(dst)) {
265 struct rt6_info *rt; 342 struct rt6_info *rt;
266 rt = (struct rt6_info *)dst; 343 rt = (struct rt6_info *)dst;
344 t->dst = dst;
267 SCTP_DEBUG_PRINTK("rt6_dst:%pI6 rt6_src:%pI6\n", 345 SCTP_DEBUG_PRINTK("rt6_dst:%pI6 rt6_src:%pI6\n",
268 &rt->rt6i_dst.addr, &rt->rt6i_src.addr); 346 &rt->rt6i_dst.addr, &fl6->saddr);
269 return dst; 347 } else {
348 t->dst = NULL;
349 SCTP_DEBUG_PRINTK("NO ROUTE\n");
270 } 350 }
271 SCTP_DEBUG_PRINTK("NO ROUTE\n");
272 dst_release(dst);
273 return NULL;
274} 351}
275 352
276/* Returns the number of consecutive initial bits that match in the 2 ipv6 353/* Returns the number of consecutive initial bits that match in the 2 ipv6
@@ -286,64 +363,18 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
286 * and asoc's bind address list. 363 * and asoc's bind address list.
287 */ 364 */
288static void sctp_v6_get_saddr(struct sctp_sock *sk, 365static void sctp_v6_get_saddr(struct sctp_sock *sk,
289 struct sctp_association *asoc, 366 struct sctp_transport *t,
290 struct dst_entry *dst, 367 struct flowi *fl)
291 union sctp_addr *daddr,
292 union sctp_addr *saddr)
293{ 368{
294 struct sctp_bind_addr *bp; 369 struct flowi6 *fl6 = &fl->u.ip6;
295 struct sctp_sockaddr_entry *laddr; 370 union sctp_addr *saddr = &t->saddr;
296 sctp_scope_t scope;
297 union sctp_addr *baddr = NULL;
298 __u8 matchlen = 0;
299 __u8 bmatchlen;
300 371
301 SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p daddr:%pI6 ", 372 SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p\n", __func__, t->asoc, t->dst);
302 __func__, asoc, dst, &daddr->v6.sin6_addr);
303
304 if (!asoc) {
305 ipv6_dev_get_saddr(sock_net(sctp_opt2sk(sk)),
306 dst ? ip6_dst_idev(dst)->dev : NULL,
307 &daddr->v6.sin6_addr,
308 inet6_sk(&sk->inet.sk)->srcprefs,
309 &saddr->v6.sin6_addr);
310 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: %pI6\n",
311 &saddr->v6.sin6_addr);
312 return;
313 }
314
315 scope = sctp_scope(daddr);
316
317 bp = &asoc->base.bind_addr;
318 373
319 /* Go through the bind address list and find the best source address 374 if (t->dst) {
320 * that matches the scope of the destination address. 375 saddr->v6.sin6_family = AF_INET6;
321 */ 376 ipv6_addr_copy(&saddr->v6.sin6_addr, &fl6->saddr);
322 rcu_read_lock();
323 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
324 if (!laddr->valid)
325 continue;
326 if ((laddr->state == SCTP_ADDR_SRC) &&
327 (laddr->a.sa.sa_family == AF_INET6) &&
328 (scope <= sctp_scope(&laddr->a))) {
329 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
330 if (!baddr || (matchlen < bmatchlen)) {
331 baddr = &laddr->a;
332 matchlen = bmatchlen;
333 }
334 }
335 }
336
337 if (baddr) {
338 memcpy(saddr, baddr, sizeof(union sctp_addr));
339 SCTP_DEBUG_PRINTK("saddr: %pI6\n", &saddr->v6.sin6_addr);
340 } else {
341 pr_err("%s: asoc:%p Could not find a valid source "
342 "address for the dest:%pI6\n",
343 __func__, asoc, &daddr->v6.sin6_addr);
344 } 377 }
345
346 rcu_read_unlock();
347} 378}
348 379
349/* Make a copy of all potential local addresses. */ 380/* Make a copy of all potential local addresses. */
@@ -465,14 +496,13 @@ static int sctp_v6_to_addr_param(const union sctp_addr *addr,
465 return length; 496 return length;
466} 497}
467 498
468/* Initialize a sctp_addr from a dst_entry. */ 499/* Initialize a sctp_addr from struct in6_addr. */
469static void sctp_v6_dst_saddr(union sctp_addr *addr, struct dst_entry *dst, 500static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
470 __be16 port) 501 __be16 port)
471{ 502{
472 struct rt6_info *rt = (struct rt6_info *)dst;
473 addr->sa.sa_family = AF_INET6; 503 addr->sa.sa_family = AF_INET6;
474 addr->v6.sin6_port = port; 504 addr->v6.sin6_port = port;
475 ipv6_addr_copy(&addr->v6.sin6_addr, &rt->rt6i_src.addr); 505 ipv6_addr_copy(&addr->v6.sin6_addr, saddr);
476} 506}
477 507
478/* Compare addresses exactly. 508/* Compare addresses exactly.
@@ -531,7 +561,7 @@ static int sctp_v6_is_any(const union sctp_addr *addr)
531static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) 561static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
532{ 562{
533 int type; 563 int type;
534 struct in6_addr *in6 = (struct in6_addr *)&addr->v6.sin6_addr; 564 const struct in6_addr *in6 = (const struct in6_addr *)&addr->v6.sin6_addr;
535 565
536 type = ipv6_addr_type(in6); 566 type = ipv6_addr_type(in6);
537 if (IPV6_ADDR_ANY == type) 567 if (IPV6_ADDR_ANY == type)
@@ -959,7 +989,6 @@ static struct sctp_af sctp_af_inet6 = {
959 .to_sk_daddr = sctp_v6_to_sk_daddr, 989 .to_sk_daddr = sctp_v6_to_sk_daddr,
960 .from_addr_param = sctp_v6_from_addr_param, 990 .from_addr_param = sctp_v6_from_addr_param,
961 .to_addr_param = sctp_v6_to_addr_param, 991 .to_addr_param = sctp_v6_to_addr_param,
962 .dst_saddr = sctp_v6_dst_saddr,
963 .cmp_addr = sctp_v6_cmp_addr, 992 .cmp_addr = sctp_v6_cmp_addr,
964 .scope = sctp_v6_scope, 993 .scope = sctp_v6_scope,
965 .addr_valid = sctp_v6_addr_valid, 994 .addr_valid = sctp_v6_addr_valid,
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index bf92a5b68f8b..1c88c8911dc5 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -131,7 +131,8 @@ static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
131static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, 131static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
132 int count_of_newacks) 132 int count_of_newacks)
133{ 133{
134 if (count_of_newacks < 2 && !transport->cacc.cacc_saw_newack) 134 if (count_of_newacks < 2 &&
135 (transport && !transport->cacc.cacc_saw_newack))
135 return 1; 136 return 1;
136 return 0; 137 return 0;
137} 138}
@@ -319,7 +320,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
319 * chunk. 320 * chunk.
320 */ 321 */
321 switch (q->asoc->state) { 322 switch (q->asoc->state) {
322 case SCTP_STATE_EMPTY:
323 case SCTP_STATE_CLOSED: 323 case SCTP_STATE_CLOSED:
324 case SCTP_STATE_SHUTDOWN_PENDING: 324 case SCTP_STATE_SHUTDOWN_PENDING:
325 case SCTP_STATE_SHUTDOWN_SENT: 325 case SCTP_STATE_SHUTDOWN_SENT:
@@ -577,6 +577,13 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
577 * try to send as much as possible. 577 * try to send as much as possible.
578 */ 578 */
579 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { 579 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
580 /* If the chunk is abandoned, move it to abandoned list. */
581 if (sctp_chunk_abandoned(chunk)) {
582 list_del_init(&chunk->transmitted_list);
583 sctp_insert_list(&q->abandoned,
584 &chunk->transmitted_list);
585 continue;
586 }
580 587
581 /* Make sure that Gap Acked TSNs are not retransmitted. A 588 /* Make sure that Gap Acked TSNs are not retransmitted. A
582 * simple approach is just to move such TSNs out of the 589 * simple approach is just to move such TSNs out of the
@@ -618,9 +625,12 @@ redo:
618 625
619 /* If we are retransmitting, we should only 626 /* If we are retransmitting, we should only
620 * send a single packet. 627 * send a single packet.
628 * Otherwise, try appending this chunk again.
621 */ 629 */
622 if (rtx_timeout || fast_rtx) 630 if (rtx_timeout || fast_rtx)
623 done = 1; 631 done = 1;
632 else
633 goto redo;
624 634
625 /* Bundle next chunk in the next round. */ 635 /* Bundle next chunk in the next round. */
626 break; 636 break;
@@ -1683,8 +1693,9 @@ static void sctp_mark_missing(struct sctp_outq *q,
1683 /* SFR-CACC may require us to skip marking 1693 /* SFR-CACC may require us to skip marking
1684 * this chunk as missing. 1694 * this chunk as missing.
1685 */ 1695 */
1686 if (!transport || !sctp_cacc_skip(primary, transport, 1696 if (!transport || !sctp_cacc_skip(primary,
1687 count_of_newacks, tsn)) { 1697 chunk->transport,
1698 count_of_newacks, tsn)) {
1688 chunk->tsn_missing_report++; 1699 chunk->tsn_missing_report++;
1689 1700
1690 SCTP_DEBUG_PRINTK( 1701 SCTP_DEBUG_PRINTK(
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d5bf91d04f63..69fbc55cf18e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -339,13 +339,12 @@ static int sctp_v4_to_addr_param(const union sctp_addr *addr,
339} 339}
340 340
341/* Initialize a sctp_addr from a dst_entry. */ 341/* Initialize a sctp_addr from a dst_entry. */
342static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct dst_entry *dst, 342static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4,
343 __be16 port) 343 __be16 port)
344{ 344{
345 struct rtable *rt = (struct rtable *)dst;
346 saddr->v4.sin_family = AF_INET; 345 saddr->v4.sin_family = AF_INET;
347 saddr->v4.sin_port = port; 346 saddr->v4.sin_port = port;
348 saddr->v4.sin_addr.s_addr = rt->rt_src; 347 saddr->v4.sin_addr.s_addr = fl4->saddr;
349} 348}
350 349
351/* Compare two addresses exactly. */ 350/* Compare two addresses exactly. */
@@ -463,35 +462,36 @@ static sctp_scope_t sctp_v4_scope(union sctp_addr *addr)
463 * addresses. If an association is passed, trys to get a dst entry with a 462 * addresses. If an association is passed, trys to get a dst entry with a
464 * source address that matches an address in the bind address list. 463 * source address that matches an address in the bind address list.
465 */ 464 */
466static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, 465static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
467 union sctp_addr *daddr, 466 struct flowi *fl, struct sock *sk)
468 union sctp_addr *saddr)
469{ 467{
468 struct sctp_association *asoc = t->asoc;
470 struct rtable *rt; 469 struct rtable *rt;
471 struct flowi4 fl4; 470 struct flowi4 *fl4 = &fl->u.ip4;
472 struct sctp_bind_addr *bp; 471 struct sctp_bind_addr *bp;
473 struct sctp_sockaddr_entry *laddr; 472 struct sctp_sockaddr_entry *laddr;
474 struct dst_entry *dst = NULL; 473 struct dst_entry *dst = NULL;
474 union sctp_addr *daddr = &t->ipaddr;
475 union sctp_addr dst_saddr; 475 union sctp_addr dst_saddr;
476 476
477 memset(&fl4, 0x0, sizeof(struct flowi4)); 477 memset(fl4, 0x0, sizeof(struct flowi4));
478 fl4.daddr = daddr->v4.sin_addr.s_addr; 478 fl4->daddr = daddr->v4.sin_addr.s_addr;
479 fl4.fl4_dport = daddr->v4.sin_port; 479 fl4->fl4_dport = daddr->v4.sin_port;
480 fl4.flowi4_proto = IPPROTO_SCTP; 480 fl4->flowi4_proto = IPPROTO_SCTP;
481 if (asoc) { 481 if (asoc) {
482 fl4.flowi4_tos = RT_CONN_FLAGS(asoc->base.sk); 482 fl4->flowi4_tos = RT_CONN_FLAGS(asoc->base.sk);
483 fl4.flowi4_oif = asoc->base.sk->sk_bound_dev_if; 483 fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if;
484 fl4.fl4_sport = htons(asoc->base.bind_addr.port); 484 fl4->fl4_sport = htons(asoc->base.bind_addr.port);
485 } 485 }
486 if (saddr) { 486 if (saddr) {
487 fl4.saddr = saddr->v4.sin_addr.s_addr; 487 fl4->saddr = saddr->v4.sin_addr.s_addr;
488 fl4.fl4_sport = saddr->v4.sin_port; 488 fl4->fl4_sport = saddr->v4.sin_port;
489 } 489 }
490 490
491 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ", 491 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
492 __func__, &fl4.daddr, &fl4.saddr); 492 __func__, &fl4->daddr, &fl4->saddr);
493 493
494 rt = ip_route_output_key(&init_net, &fl4); 494 rt = ip_route_output_key(&init_net, fl4);
495 if (!IS_ERR(rt)) 495 if (!IS_ERR(rt))
496 dst = &rt->dst; 496 dst = &rt->dst;
497 497
@@ -507,7 +507,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
507 /* Walk through the bind address list and look for a bind 507 /* Walk through the bind address list and look for a bind
508 * address that matches the source address of the returned dst. 508 * address that matches the source address of the returned dst.
509 */ 509 */
510 sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); 510 sctp_v4_dst_saddr(&dst_saddr, fl4, htons(bp->port));
511 rcu_read_lock(); 511 rcu_read_lock();
512 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 512 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
513 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) 513 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
@@ -533,9 +533,9 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
533 continue; 533 continue;
534 if ((laddr->state == SCTP_ADDR_SRC) && 534 if ((laddr->state == SCTP_ADDR_SRC) &&
535 (AF_INET == laddr->a.sa.sa_family)) { 535 (AF_INET == laddr->a.sa.sa_family)) {
536 fl4.saddr = laddr->a.v4.sin_addr.s_addr; 536 fl4->saddr = laddr->a.v4.sin_addr.s_addr;
537 fl4.fl4_sport = laddr->a.v4.sin_port; 537 fl4->fl4_sport = laddr->a.v4.sin_port;
538 rt = ip_route_output_key(&init_net, &fl4); 538 rt = ip_route_output_key(&init_net, fl4);
539 if (!IS_ERR(rt)) { 539 if (!IS_ERR(rt)) {
540 dst = &rt->dst; 540 dst = &rt->dst;
541 goto out_unlock; 541 goto out_unlock;
@@ -546,32 +546,26 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
546out_unlock: 546out_unlock:
547 rcu_read_unlock(); 547 rcu_read_unlock();
548out: 548out:
549 t->dst = dst;
549 if (dst) 550 if (dst)
550 SCTP_DEBUG_PRINTK("rt_dst:%pI4, rt_src:%pI4\n", 551 SCTP_DEBUG_PRINTK("rt_dst:%pI4, rt_src:%pI4\n",
551 &rt->rt_dst, &rt->rt_src); 552 &fl4->daddr, &fl4->saddr);
552 else 553 else
553 SCTP_DEBUG_PRINTK("NO ROUTE\n"); 554 SCTP_DEBUG_PRINTK("NO ROUTE\n");
554
555 return dst;
556} 555}
557 556
558/* For v4, the source address is cached in the route entry(dst). So no need 557/* For v4, the source address is cached in the route entry(dst). So no need
559 * to cache it separately and hence this is an empty routine. 558 * to cache it separately and hence this is an empty routine.
560 */ 559 */
561static void sctp_v4_get_saddr(struct sctp_sock *sk, 560static void sctp_v4_get_saddr(struct sctp_sock *sk,
562 struct sctp_association *asoc, 561 struct sctp_transport *t,
563 struct dst_entry *dst, 562 struct flowi *fl)
564 union sctp_addr *daddr,
565 union sctp_addr *saddr)
566{ 563{
567 struct rtable *rt = (struct rtable *)dst; 564 union sctp_addr *saddr = &t->saddr;
568 565 struct rtable *rt = (struct rtable *)t->dst;
569 if (!asoc)
570 return;
571 566
572 if (rt) { 567 if (rt) {
573 saddr->v4.sin_family = AF_INET; 568 saddr->v4.sin_family = AF_INET;
574 saddr->v4.sin_port = htons(asoc->base.bind_addr.port);
575 saddr->v4.sin_addr.s_addr = rt->rt_src; 569 saddr->v4.sin_addr.s_addr = rt->rt_src;
576 } 570 }
577} 571}
@@ -950,7 +944,6 @@ static struct sctp_af sctp_af_inet = {
950 .to_sk_daddr = sctp_v4_to_sk_daddr, 944 .to_sk_daddr = sctp_v4_to_sk_daddr,
951 .from_addr_param = sctp_v4_from_addr_param, 945 .from_addr_param = sctp_v4_from_addr_param,
952 .to_addr_param = sctp_v4_to_addr_param, 946 .to_addr_param = sctp_v4_to_addr_param,
953 .dst_saddr = sctp_v4_dst_saddr,
954 .cmp_addr = sctp_v4_cmp_addr, 947 .cmp_addr = sctp_v4_cmp_addr,
955 .addr_valid = sctp_v4_addr_valid, 948 .addr_valid = sctp_v4_addr_valid,
956 .inaddr_any = sctp_v4_inaddr_any, 949 .inaddr_any = sctp_v4_inaddr_any,
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index b3434cc7d0cf..58eb27fed4b4 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1075,20 +1075,28 @@ nodata:
1075 1075
1076/* Make a HEARTBEAT chunk. */ 1076/* Make a HEARTBEAT chunk. */
1077struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, 1077struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
1078 const struct sctp_transport *transport, 1078 const struct sctp_transport *transport)
1079 const void *payload, const size_t paylen)
1080{ 1079{
1081 struct sctp_chunk *retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 1080 struct sctp_chunk *retval;
1082 0, paylen); 1081 sctp_sender_hb_info_t hbinfo;
1082
1083 retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
1083 1084
1084 if (!retval) 1085 if (!retval)
1085 goto nodata; 1086 goto nodata;
1086 1087
1088 hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
1089 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
1090 hbinfo.daddr = transport->ipaddr;
1091 hbinfo.sent_at = jiffies;
1092 hbinfo.hb_nonce = transport->hb_nonce;
1093
1087 /* Cast away the 'const', as this is just telling the chunk 1094 /* Cast away the 'const', as this is just telling the chunk
1088 * what transport it belongs to. 1095 * what transport it belongs to.
1089 */ 1096 */
1090 retval->transport = (struct sctp_transport *) transport; 1097 retval->transport = (struct sctp_transport *) transport;
1091 retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); 1098 retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo),
1099 &hbinfo);
1092 1100
1093nodata: 1101nodata:
1094 return retval; 1102 return retval;
@@ -2242,14 +2250,17 @@ int sctp_verify_init(const struct sctp_association *asoc,
2242 * Returns 0 on failure, else success. 2250 * Returns 0 on failure, else success.
2243 * FIXME: This is an association method. 2251 * FIXME: This is an association method.
2244 */ 2252 */
2245int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, 2253int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2246 const union sctp_addr *peer_addr, 2254 const union sctp_addr *peer_addr,
2247 sctp_init_chunk_t *peer_init, gfp_t gfp) 2255 sctp_init_chunk_t *peer_init, gfp_t gfp)
2248{ 2256{
2249 union sctp_params param; 2257 union sctp_params param;
2250 struct sctp_transport *transport; 2258 struct sctp_transport *transport;
2251 struct list_head *pos, *temp; 2259 struct list_head *pos, *temp;
2260 struct sctp_af *af;
2261 union sctp_addr addr;
2252 char *cookie; 2262 char *cookie;
2263 int src_match = 0;
2253 2264
2254 /* We must include the address that the INIT packet came from. 2265 /* We must include the address that the INIT packet came from.
2255 * This is the only address that matters for an INIT packet. 2266 * This is the only address that matters for an INIT packet.
@@ -2261,18 +2272,31 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
2261 * added as the primary transport. The source address seems to 2272 * added as the primary transport. The source address seems to
2262 * be a a better choice than any of the embedded addresses. 2273 * be a a better choice than any of the embedded addresses.
2263 */ 2274 */
2264 if (peer_addr) { 2275 if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
2265 if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) 2276 goto nomem;
2266 goto nomem; 2277
2267 } 2278 if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr))
2279 src_match = 1;
2268 2280
2269 /* Process the initialization parameters. */ 2281 /* Process the initialization parameters. */
2270 sctp_walk_params(param, peer_init, init_hdr.params) { 2282 sctp_walk_params(param, peer_init, init_hdr.params) {
2283 if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
2284 param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
2285 af = sctp_get_af_specific(param_type2af(param.p->type));
2286 af->from_addr_param(&addr, param.addr,
2287 chunk->sctp_hdr->source, 0);
2288 if (sctp_cmp_addr_exact(sctp_source(chunk), &addr))
2289 src_match = 1;
2290 }
2271 2291
2272 if (!sctp_process_param(asoc, param, peer_addr, gfp)) 2292 if (!sctp_process_param(asoc, param, peer_addr, gfp))
2273 goto clean_up; 2293 goto clean_up;
2274 } 2294 }
2275 2295
2296 /* source address of chunk may not match any valid address */
2297 if (!src_match)
2298 goto clean_up;
2299
2276 /* AUTH: After processing the parameters, make sure that we 2300 /* AUTH: After processing the parameters, make sure that we
2277 * have all the required info to potentially do authentications. 2301 * have all the required info to potentially do authentications.
2278 */ 2302 */
@@ -2923,7 +2947,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2923 asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) 2947 asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY)
2924 return SCTP_ERROR_UNKNOWN_PARAM; 2948 return SCTP_ERROR_UNKNOWN_PARAM;
2925 2949
2926 switch (addr_param->v4.param_hdr.type) { 2950 switch (addr_param->p.type) {
2927 case SCTP_PARAM_IPV6_ADDRESS: 2951 case SCTP_PARAM_IPV6_ADDRESS:
2928 if (!asoc->peer.ipv6_address) 2952 if (!asoc->peer.ipv6_address)
2929 return SCTP_ERROR_DNS_FAILED; 2953 return SCTP_ERROR_DNS_FAILED;
@@ -2936,7 +2960,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2936 return SCTP_ERROR_DNS_FAILED; 2960 return SCTP_ERROR_DNS_FAILED;
2937 } 2961 }
2938 2962
2939 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 2963 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
2940 if (unlikely(!af)) 2964 if (unlikely(!af))
2941 return SCTP_ERROR_DNS_FAILED; 2965 return SCTP_ERROR_DNS_FAILED;
2942 2966
@@ -3100,7 +3124,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
3100 /* Skip the address parameter and store a pointer to the first 3124 /* Skip the address parameter and store a pointer to the first
3101 * asconf parameter. 3125 * asconf parameter.
3102 */ 3126 */
3103 length = ntohs(addr_param->v4.param_hdr.length); 3127 length = ntohs(addr_param->p.length);
3104 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 3128 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
3105 chunk_len -= length; 3129 chunk_len -= length;
3106 3130
@@ -3177,7 +3201,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3177 ((void *)asconf_param + sizeof(sctp_addip_param_t)); 3201 ((void *)asconf_param + sizeof(sctp_addip_param_t));
3178 3202
3179 /* We have checked the packet before, so we do not check again. */ 3203 /* We have checked the packet before, so we do not check again. */
3180 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 3204 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
3181 af->from_addr_param(&addr, addr_param, htons(bp->port), 0); 3205 af->from_addr_param(&addr, addr_param, htons(bp->port), 0);
3182 3206
3183 switch (asconf_param->param_hdr.type) { 3207 switch (asconf_param->param_hdr.type) {
@@ -3193,11 +3217,8 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3193 local_bh_enable(); 3217 local_bh_enable();
3194 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 3218 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3195 transports) { 3219 transports) {
3196 if (transport->state == SCTP_ACTIVE)
3197 continue;
3198 dst_release(transport->dst); 3220 dst_release(transport->dst);
3199 sctp_transport_route(transport, NULL, 3221 transport->dst = NULL;
3200 sctp_sk(asoc->base.sk));
3201 } 3222 }
3202 break; 3223 break;
3203 case SCTP_PARAM_DEL_IP: 3224 case SCTP_PARAM_DEL_IP:
@@ -3207,8 +3228,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3207 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 3228 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3208 transports) { 3229 transports) {
3209 dst_release(transport->dst); 3230 dst_release(transport->dst);
3210 sctp_transport_route(transport, NULL, 3231 transport->dst = NULL;
3211 sctp_sk(asoc->base.sk));
3212 } 3232 }
3213 break; 3233 break;
3214 default: 3234 default:
@@ -3304,7 +3324,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3304 /* Skip the address parameter in the last asconf sent and store a 3324 /* Skip the address parameter in the last asconf sent and store a
3305 * pointer to the first asconf parameter. 3325 * pointer to the first asconf parameter.
3306 */ 3326 */
3307 length = ntohs(addr_param->v4.param_hdr.length); 3327 length = ntohs(addr_param->p.length);
3308 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 3328 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
3309 asconf_len -= length; 3329 asconf_len -= length;
3310 3330
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 5f86ee4b54c1..d612ca1ca6c0 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -595,8 +595,7 @@ static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
595 * fail during INIT processing (due to malloc problems), 595 * fail during INIT processing (due to malloc problems),
596 * just return the error and stop processing the stack. 596 * just return the error and stop processing the stack.
597 */ 597 */
598 if (!sctp_process_init(asoc, chunk->chunk_hdr->type, 598 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
599 sctp_source(chunk), peer_init, gfp))
600 error = -ENOMEM; 599 error = -ENOMEM;
601 else 600 else
602 error = 0; 601 error = 0;
@@ -1415,12 +1414,6 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1415 SCTP_RTXR_T3_RTX); 1414 SCTP_RTXR_T3_RTX);
1416 break; 1415 break;
1417 1416
1418 case SCTP_CMD_TRANSMIT:
1419 /* Kick start transmission. */
1420 error = sctp_outq_uncork(&asoc->outqueue);
1421 local_cork = 0;
1422 break;
1423
1424 case SCTP_CMD_ECN_CE: 1417 case SCTP_CMD_ECN_CE:
1425 /* Do delayed CE processing. */ 1418 /* Do delayed CE processing. */
1426 sctp_do_ecn_ce_work(asoc, cmd->obj.u32); 1419 sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 76792083c379..7f4a4f8368ee 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -393,8 +393,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
393 goto nomem_init; 393 goto nomem_init;
394 394
395 /* The call, sctp_process_init(), can fail on memory allocation. */ 395 /* The call, sctp_process_init(), can fail on memory allocation. */
396 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 396 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
397 sctp_source(chunk),
398 (sctp_init_chunk_t *)chunk->chunk_hdr, 397 (sctp_init_chunk_t *)chunk->chunk_hdr,
399 GFP_ATOMIC)) 398 GFP_ATOMIC))
400 goto nomem_init; 399 goto nomem_init;
@@ -725,7 +724,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
725 */ 724 */
726 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 725 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
727 726
728 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 727 if (!sctp_process_init(new_asoc, chunk,
729 &chunk->subh.cookie_hdr->c.peer_addr, 728 &chunk->subh.cookie_hdr->c.peer_addr,
730 peer_init, GFP_ATOMIC)) 729 peer_init, GFP_ATOMIC))
731 goto nomem_init; 730 goto nomem_init;
@@ -942,18 +941,9 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
942{ 941{
943 struct sctp_transport *transport = (struct sctp_transport *) arg; 942 struct sctp_transport *transport = (struct sctp_transport *) arg;
944 struct sctp_chunk *reply; 943 struct sctp_chunk *reply;
945 sctp_sender_hb_info_t hbinfo;
946 size_t paylen = 0;
947
948 hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
949 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
950 hbinfo.daddr = transport->ipaddr;
951 hbinfo.sent_at = jiffies;
952 hbinfo.hb_nonce = transport->hb_nonce;
953 944
954 /* Send a heartbeat to our peer. */ 945 /* Send a heartbeat to our peer. */
955 paylen = sizeof(sctp_sender_hb_info_t); 946 reply = sctp_make_heartbeat(asoc, transport);
956 reply = sctp_make_heartbeat(asoc, transport, &hbinfo, paylen);
957 if (!reply) 947 if (!reply)
958 return SCTP_DISPOSITION_NOMEM; 948 return SCTP_DISPOSITION_NOMEM;
959 949
@@ -1464,8 +1454,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1464 * Verification Tag and Peers Verification tag into a reserved 1454 * Verification Tag and Peers Verification tag into a reserved
1465 * place (local tie-tag and per tie-tag) within the state cookie. 1455 * place (local tie-tag and per tie-tag) within the state cookie.
1466 */ 1456 */
1467 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1457 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
1468 sctp_source(chunk),
1469 (sctp_init_chunk_t *)chunk->chunk_hdr, 1458 (sctp_init_chunk_t *)chunk->chunk_hdr,
1470 GFP_ATOMIC)) 1459 GFP_ATOMIC))
1471 goto nomem; 1460 goto nomem;
@@ -1694,8 +1683,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
1694 */ 1683 */
1695 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 1684 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
1696 1685
1697 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1686 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
1698 sctp_source(chunk), peer_init,
1699 GFP_ATOMIC)) 1687 GFP_ATOMIC))
1700 goto nomem; 1688 goto nomem;
1701 1689
@@ -1780,8 +1768,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
1780 * side effects--it is safe to run them here. 1768 * side effects--it is safe to run them here.
1781 */ 1769 */
1782 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 1770 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
1783 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1771 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
1784 sctp_source(chunk), peer_init,
1785 GFP_ATOMIC)) 1772 GFP_ATOMIC))
1786 goto nomem; 1773 goto nomem;
1787 1774
@@ -2412,8 +2399,15 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2412 2399
2413 /* See if we have an error cause code in the chunk. */ 2400 /* See if we have an error cause code in the chunk. */
2414 len = ntohs(chunk->chunk_hdr->length); 2401 len = ntohs(chunk->chunk_hdr->length);
2415 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2402 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
2403
2404 sctp_errhdr_t *err;
2405 sctp_walk_errors(err, chunk->chunk_hdr);
2406 if ((void *)err != (void *)chunk->chunk_end)
2407 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
2408
2416 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2409 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2410 }
2417 2411
2418 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); 2412 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
2419 /* ASSOC_FAILED will DELETE_TCB. */ 2413 /* ASSOC_FAILED will DELETE_TCB. */
@@ -3204,6 +3198,7 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3204 sctp_cmd_seq_t *commands) 3198 sctp_cmd_seq_t *commands)
3205{ 3199{
3206 struct sctp_chunk *chunk = arg; 3200 struct sctp_chunk *chunk = arg;
3201 sctp_errhdr_t *err;
3207 3202
3208 if (!sctp_vtag_verify(chunk, asoc)) 3203 if (!sctp_vtag_verify(chunk, asoc))
3209 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3204 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -3212,6 +3207,10 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3212 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) 3207 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
3213 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3208 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3214 commands); 3209 commands);
3210 sctp_walk_errors(err, chunk->chunk_hdr);
3211 if ((void *)err != (void *)chunk->chunk_end)
3212 return sctp_sf_violation_paramlen(ep, asoc, type, arg,
3213 (void *)err, commands);
3215 3214
3216 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, 3215 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
3217 SCTP_CHUNK(chunk)); 3216 SCTP_CHUNK(chunk));
@@ -3320,8 +3319,10 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3320 struct sctp_chunk *chunk = arg; 3319 struct sctp_chunk *chunk = arg;
3321 struct sk_buff *skb = chunk->skb; 3320 struct sk_buff *skb = chunk->skb;
3322 sctp_chunkhdr_t *ch; 3321 sctp_chunkhdr_t *ch;
3322 sctp_errhdr_t *err;
3323 __u8 *ch_end; 3323 __u8 *ch_end;
3324 int ootb_shut_ack = 0; 3324 int ootb_shut_ack = 0;
3325 int ootb_cookie_ack = 0;
3325 3326
3326 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES); 3327 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
3327 3328
@@ -3346,6 +3347,23 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3346 if (SCTP_CID_ABORT == ch->type) 3347 if (SCTP_CID_ABORT == ch->type)
3347 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3348 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3348 3349
3350 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
3351 * or a COOKIE ACK the SCTP Packet should be silently
3352 * discarded.
3353 */
3354
3355 if (SCTP_CID_COOKIE_ACK == ch->type)
3356 ootb_cookie_ack = 1;
3357
3358 if (SCTP_CID_ERROR == ch->type) {
3359 sctp_walk_errors(err, ch) {
3360 if (SCTP_ERROR_STALE_COOKIE == err->cause) {
3361 ootb_cookie_ack = 1;
3362 break;
3363 }
3364 }
3365 }
3366
3349 /* Report violation if chunk len overflows */ 3367 /* Report violation if chunk len overflows */
3350 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 3368 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3351 if (ch_end > skb_tail_pointer(skb)) 3369 if (ch_end > skb_tail_pointer(skb))
@@ -3357,6 +3375,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3357 3375
3358 if (ootb_shut_ack) 3376 if (ootb_shut_ack)
3359 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); 3377 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
3378 else if (ootb_cookie_ack)
3379 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3360 else 3380 else
3361 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 3381 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
3362} 3382}
@@ -4343,8 +4363,9 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
4343 4363
4344/* 4364/*
4345 * Handle a protocol violation when the parameter length is invalid. 4365 * Handle a protocol violation when the parameter length is invalid.
4346 * "Invalid" length is identified as smaller than the minimal length a 4366 * If the length is smaller than the minimum length of a given parameter,
4347 * given parameter can be. 4367 * or accumulated length in multi parameters exceeds the end of the chunk,
4368 * the length is considered as invalid.
4348 */ 4369 */
4349static sctp_disposition_t sctp_sf_violation_paramlen( 4370static sctp_disposition_t sctp_sf_violation_paramlen(
4350 const struct sctp_endpoint *ep, 4371 const struct sctp_endpoint *ep,
@@ -5056,6 +5077,30 @@ sctp_disposition_t sctp_sf_ignore_primitive(
5056 ***************************************************************************/ 5077 ***************************************************************************/
5057 5078
5058/* 5079/*
5080 * When the SCTP stack has no more user data to send or retransmit, this
5081 * notification is given to the user. Also, at the time when a user app
5082 * subscribes to this event, if there is no data to be sent or
5083 * retransmit, the stack will immediately send up this notification.
5084 */
5085sctp_disposition_t sctp_sf_do_no_pending_tsn(
5086 const struct sctp_endpoint *ep,
5087 const struct sctp_association *asoc,
5088 const sctp_subtype_t type,
5089 void *arg,
5090 sctp_cmd_seq_t *commands)
5091{
5092 struct sctp_ulpevent *event;
5093
5094 event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC);
5095 if (!event)
5096 return SCTP_DISPOSITION_NOMEM;
5097
5098 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event));
5099
5100 return SCTP_DISPOSITION_CONSUME;
5101}
5102
5103/*
5059 * Start the shutdown negotiation. 5104 * Start the shutdown negotiation.
5060 * 5105 *
5061 * From Section 9.2: 5106 * From Section 9.2:
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 546d4387fb3c..0338dc6fdc9d 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -107,8 +107,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
107#define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func} 107#define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func}
108 108
109#define TYPE_SCTP_DATA { \ 109#define TYPE_SCTP_DATA { \
110 /* SCTP_STATE_EMPTY */ \
111 TYPE_SCTP_FUNC(sctp_sf_ootb), \
112 /* SCTP_STATE_CLOSED */ \ 110 /* SCTP_STATE_CLOSED */ \
113 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 111 TYPE_SCTP_FUNC(sctp_sf_ootb), \
114 /* SCTP_STATE_COOKIE_WAIT */ \ 112 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -128,8 +126,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
128} /* TYPE_SCTP_DATA */ 126} /* TYPE_SCTP_DATA */
129 127
130#define TYPE_SCTP_INIT { \ 128#define TYPE_SCTP_INIT { \
131 /* SCTP_STATE_EMPTY */ \
132 TYPE_SCTP_FUNC(sctp_sf_bug), \
133 /* SCTP_STATE_CLOSED */ \ 129 /* SCTP_STATE_CLOSED */ \
134 TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \ 130 TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \
135 /* SCTP_STATE_COOKIE_WAIT */ \ 131 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -149,8 +145,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
149} /* TYPE_SCTP_INIT */ 145} /* TYPE_SCTP_INIT */
150 146
151#define TYPE_SCTP_INIT_ACK { \ 147#define TYPE_SCTP_INIT_ACK { \
152 /* SCTP_STATE_EMPTY */ \
153 TYPE_SCTP_FUNC(sctp_sf_ootb), \
154 /* SCTP_STATE_CLOSED */ \ 148 /* SCTP_STATE_CLOSED */ \
155 TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \ 149 TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \
156 /* SCTP_STATE_COOKIE_WAIT */ \ 150 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -170,8 +164,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
170} /* TYPE_SCTP_INIT_ACK */ 164} /* TYPE_SCTP_INIT_ACK */
171 165
172#define TYPE_SCTP_SACK { \ 166#define TYPE_SCTP_SACK { \
173 /* SCTP_STATE_EMPTY */ \
174 TYPE_SCTP_FUNC(sctp_sf_ootb), \
175 /* SCTP_STATE_CLOSED */ \ 167 /* SCTP_STATE_CLOSED */ \
176 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 168 TYPE_SCTP_FUNC(sctp_sf_ootb), \
177 /* SCTP_STATE_COOKIE_WAIT */ \ 169 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -191,8 +183,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
191} /* TYPE_SCTP_SACK */ 183} /* TYPE_SCTP_SACK */
192 184
193#define TYPE_SCTP_HEARTBEAT { \ 185#define TYPE_SCTP_HEARTBEAT { \
194 /* SCTP_STATE_EMPTY */ \
195 TYPE_SCTP_FUNC(sctp_sf_ootb), \
196 /* SCTP_STATE_CLOSED */ \ 186 /* SCTP_STATE_CLOSED */ \
197 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 187 TYPE_SCTP_FUNC(sctp_sf_ootb), \
198 /* SCTP_STATE_COOKIE_WAIT */ \ 188 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -213,8 +203,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
213} /* TYPE_SCTP_HEARTBEAT */ 203} /* TYPE_SCTP_HEARTBEAT */
214 204
215#define TYPE_SCTP_HEARTBEAT_ACK { \ 205#define TYPE_SCTP_HEARTBEAT_ACK { \
216 /* SCTP_STATE_EMPTY */ \
217 TYPE_SCTP_FUNC(sctp_sf_ootb), \
218 /* SCTP_STATE_CLOSED */ \ 206 /* SCTP_STATE_CLOSED */ \
219 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 207 TYPE_SCTP_FUNC(sctp_sf_ootb), \
220 /* SCTP_STATE_COOKIE_WAIT */ \ 208 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -234,8 +222,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
234} /* TYPE_SCTP_HEARTBEAT_ACK */ 222} /* TYPE_SCTP_HEARTBEAT_ACK */
235 223
236#define TYPE_SCTP_ABORT { \ 224#define TYPE_SCTP_ABORT { \
237 /* SCTP_STATE_EMPTY */ \
238 TYPE_SCTP_FUNC(sctp_sf_ootb), \
239 /* SCTP_STATE_CLOSED */ \ 225 /* SCTP_STATE_CLOSED */ \
240 TYPE_SCTP_FUNC(sctp_sf_pdiscard), \ 226 TYPE_SCTP_FUNC(sctp_sf_pdiscard), \
241 /* SCTP_STATE_COOKIE_WAIT */ \ 227 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -255,8 +241,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
255} /* TYPE_SCTP_ABORT */ 241} /* TYPE_SCTP_ABORT */
256 242
257#define TYPE_SCTP_SHUTDOWN { \ 243#define TYPE_SCTP_SHUTDOWN { \
258 /* SCTP_STATE_EMPTY */ \
259 TYPE_SCTP_FUNC(sctp_sf_ootb), \
260 /* SCTP_STATE_CLOSED */ \ 244 /* SCTP_STATE_CLOSED */ \
261 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 245 TYPE_SCTP_FUNC(sctp_sf_ootb), \
262 /* SCTP_STATE_COOKIE_WAIT */ \ 246 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -276,8 +260,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
276} /* TYPE_SCTP_SHUTDOWN */ 260} /* TYPE_SCTP_SHUTDOWN */
277 261
278#define TYPE_SCTP_SHUTDOWN_ACK { \ 262#define TYPE_SCTP_SHUTDOWN_ACK { \
279 /* SCTP_STATE_EMPTY */ \
280 TYPE_SCTP_FUNC(sctp_sf_ootb), \
281 /* SCTP_STATE_CLOSED */ \ 263 /* SCTP_STATE_CLOSED */ \
282 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 264 TYPE_SCTP_FUNC(sctp_sf_ootb), \
283 /* SCTP_STATE_COOKIE_WAIT */ \ 265 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -297,8 +279,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
297} /* TYPE_SCTP_SHUTDOWN_ACK */ 279} /* TYPE_SCTP_SHUTDOWN_ACK */
298 280
299#define TYPE_SCTP_ERROR { \ 281#define TYPE_SCTP_ERROR { \
300 /* SCTP_STATE_EMPTY */ \
301 TYPE_SCTP_FUNC(sctp_sf_ootb), \
302 /* SCTP_STATE_CLOSED */ \ 282 /* SCTP_STATE_CLOSED */ \
303 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 283 TYPE_SCTP_FUNC(sctp_sf_ootb), \
304 /* SCTP_STATE_COOKIE_WAIT */ \ 284 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -318,8 +298,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
318} /* TYPE_SCTP_ERROR */ 298} /* TYPE_SCTP_ERROR */
319 299
320#define TYPE_SCTP_COOKIE_ECHO { \ 300#define TYPE_SCTP_COOKIE_ECHO { \
321 /* SCTP_STATE_EMPTY */ \
322 TYPE_SCTP_FUNC(sctp_sf_bug), \
323 /* SCTP_STATE_CLOSED */ \ 301 /* SCTP_STATE_CLOSED */ \
324 TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \ 302 TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \
325 /* SCTP_STATE_COOKIE_WAIT */ \ 303 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -339,8 +317,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
339} /* TYPE_SCTP_COOKIE_ECHO */ 317} /* TYPE_SCTP_COOKIE_ECHO */
340 318
341#define TYPE_SCTP_COOKIE_ACK { \ 319#define TYPE_SCTP_COOKIE_ACK { \
342 /* SCTP_STATE_EMPTY */ \
343 TYPE_SCTP_FUNC(sctp_sf_ootb), \
344 /* SCTP_STATE_CLOSED */ \ 320 /* SCTP_STATE_CLOSED */ \
345 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 321 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
346 /* SCTP_STATE_COOKIE_WAIT */ \ 322 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -360,8 +336,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
360} /* TYPE_SCTP_COOKIE_ACK */ 336} /* TYPE_SCTP_COOKIE_ACK */
361 337
362#define TYPE_SCTP_ECN_ECNE { \ 338#define TYPE_SCTP_ECN_ECNE { \
363 /* SCTP_STATE_EMPTY */ \
364 TYPE_SCTP_FUNC(sctp_sf_ootb), \
365 /* SCTP_STATE_CLOSED */ \ 339 /* SCTP_STATE_CLOSED */ \
366 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 340 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
367 /* SCTP_STATE_COOKIE_WAIT */ \ 341 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -381,8 +355,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
381} /* TYPE_SCTP_ECN_ECNE */ 355} /* TYPE_SCTP_ECN_ECNE */
382 356
383#define TYPE_SCTP_ECN_CWR { \ 357#define TYPE_SCTP_ECN_CWR { \
384 /* SCTP_STATE_EMPTY */ \
385 TYPE_SCTP_FUNC(sctp_sf_ootb), \
386 /* SCTP_STATE_CLOSED */ \ 358 /* SCTP_STATE_CLOSED */ \
387 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 359 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
388 /* SCTP_STATE_COOKIE_WAIT */ \ 360 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -402,8 +374,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
402} /* TYPE_SCTP_ECN_CWR */ 374} /* TYPE_SCTP_ECN_CWR */
403 375
404#define TYPE_SCTP_SHUTDOWN_COMPLETE { \ 376#define TYPE_SCTP_SHUTDOWN_COMPLETE { \
405 /* SCTP_STATE_EMPTY */ \
406 TYPE_SCTP_FUNC(sctp_sf_ootb), \
407 /* SCTP_STATE_CLOSED */ \ 377 /* SCTP_STATE_CLOSED */ \
408 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 378 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
409 /* SCTP_STATE_COOKIE_WAIT */ \ 379 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -446,8 +416,6 @@ static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][
446}; /* state_fn_t chunk_event_table[][] */ 416}; /* state_fn_t chunk_event_table[][] */
447 417
448#define TYPE_SCTP_ASCONF { \ 418#define TYPE_SCTP_ASCONF { \
449 /* SCTP_STATE_EMPTY */ \
450 TYPE_SCTP_FUNC(sctp_sf_ootb), \
451 /* SCTP_STATE_CLOSED */ \ 419 /* SCTP_STATE_CLOSED */ \
452 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 420 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
453 /* SCTP_STATE_COOKIE_WAIT */ \ 421 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -467,8 +435,6 @@ static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][
467} /* TYPE_SCTP_ASCONF */ 435} /* TYPE_SCTP_ASCONF */
468 436
469#define TYPE_SCTP_ASCONF_ACK { \ 437#define TYPE_SCTP_ASCONF_ACK { \
470 /* SCTP_STATE_EMPTY */ \
471 TYPE_SCTP_FUNC(sctp_sf_ootb), \
472 /* SCTP_STATE_CLOSED */ \ 438 /* SCTP_STATE_CLOSED */ \
473 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 439 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
474 /* SCTP_STATE_COOKIE_WAIT */ \ 440 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -496,8 +462,6 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_
496}; /*state_fn_t addip_chunk_event_table[][] */ 462}; /*state_fn_t addip_chunk_event_table[][] */
497 463
498#define TYPE_SCTP_FWD_TSN { \ 464#define TYPE_SCTP_FWD_TSN { \
499 /* SCTP_STATE_EMPTY */ \
500 TYPE_SCTP_FUNC(sctp_sf_ootb), \
501 /* SCTP_STATE_CLOSED */ \ 465 /* SCTP_STATE_CLOSED */ \
502 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 466 TYPE_SCTP_FUNC(sctp_sf_ootb), \
503 /* SCTP_STATE_COOKIE_WAIT */ \ 467 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -524,8 +488,6 @@ static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUN
524}; /*state_fn_t prsctp_chunk_event_table[][] */ 488}; /*state_fn_t prsctp_chunk_event_table[][] */
525 489
526#define TYPE_SCTP_AUTH { \ 490#define TYPE_SCTP_AUTH { \
527 /* SCTP_STATE_EMPTY */ \
528 TYPE_SCTP_FUNC(sctp_sf_ootb), \
529 /* SCTP_STATE_CLOSED */ \ 491 /* SCTP_STATE_CLOSED */ \
530 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 492 TYPE_SCTP_FUNC(sctp_sf_ootb), \
531 /* SCTP_STATE_COOKIE_WAIT */ \ 493 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -553,8 +515,6 @@ static const sctp_sm_table_entry_t auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TY
553 515
554static const sctp_sm_table_entry_t 516static const sctp_sm_table_entry_t
555chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { 517chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
556 /* SCTP_STATE_EMPTY */
557 TYPE_SCTP_FUNC(sctp_sf_ootb),
558 /* SCTP_STATE_CLOSED */ 518 /* SCTP_STATE_CLOSED */
559 TYPE_SCTP_FUNC(sctp_sf_ootb), 519 TYPE_SCTP_FUNC(sctp_sf_ootb),
560 /* SCTP_STATE_COOKIE_WAIT */ 520 /* SCTP_STATE_COOKIE_WAIT */
@@ -575,8 +535,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
575 535
576 536
577#define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \ 537#define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \
578 /* SCTP_STATE_EMPTY */ \
579 TYPE_SCTP_FUNC(sctp_sf_bug), \
580 /* SCTP_STATE_CLOSED */ \ 538 /* SCTP_STATE_CLOSED */ \
581 TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \ 539 TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \
582 /* SCTP_STATE_COOKIE_WAIT */ \ 540 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -596,8 +554,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
596} /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */ 554} /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */
597 555
598#define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \ 556#define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \
599 /* SCTP_STATE_EMPTY */ \
600 TYPE_SCTP_FUNC(sctp_sf_bug), \
601 /* SCTP_STATE_CLOSED */ \ 557 /* SCTP_STATE_CLOSED */ \
602 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 558 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
603 /* SCTP_STATE_COOKIE_WAIT */ \ 559 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -617,8 +573,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
617} /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */ 573} /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */
618 574
619#define TYPE_SCTP_PRIMITIVE_ABORT { \ 575#define TYPE_SCTP_PRIMITIVE_ABORT { \
620 /* SCTP_STATE_EMPTY */ \
621 TYPE_SCTP_FUNC(sctp_sf_bug), \
622 /* SCTP_STATE_CLOSED */ \ 576 /* SCTP_STATE_CLOSED */ \
623 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 577 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
624 /* SCTP_STATE_COOKIE_WAIT */ \ 578 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -638,8 +592,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
638} /* TYPE_SCTP_PRIMITIVE_ABORT */ 592} /* TYPE_SCTP_PRIMITIVE_ABORT */
639 593
640#define TYPE_SCTP_PRIMITIVE_SEND { \ 594#define TYPE_SCTP_PRIMITIVE_SEND { \
641 /* SCTP_STATE_EMPTY */ \
642 TYPE_SCTP_FUNC(sctp_sf_bug), \
643 /* SCTP_STATE_CLOSED */ \ 595 /* SCTP_STATE_CLOSED */ \
644 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 596 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
645 /* SCTP_STATE_COOKIE_WAIT */ \ 597 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -659,8 +611,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
659} /* TYPE_SCTP_PRIMITIVE_SEND */ 611} /* TYPE_SCTP_PRIMITIVE_SEND */
660 612
661#define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \ 613#define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \
662 /* SCTP_STATE_EMPTY */ \
663 TYPE_SCTP_FUNC(sctp_sf_bug), \
664 /* SCTP_STATE_CLOSED */ \ 614 /* SCTP_STATE_CLOSED */ \
665 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 615 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
666 /* SCTP_STATE_COOKIE_WAIT */ \ 616 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -680,8 +630,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
680} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ 630} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */
681 631
682#define TYPE_SCTP_PRIMITIVE_ASCONF { \ 632#define TYPE_SCTP_PRIMITIVE_ASCONF { \
683 /* SCTP_STATE_EMPTY */ \
684 TYPE_SCTP_FUNC(sctp_sf_bug), \
685 /* SCTP_STATE_CLOSED */ \ 633 /* SCTP_STATE_CLOSED */ \
686 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 634 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
687 /* SCTP_STATE_COOKIE_WAIT */ \ 635 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -713,8 +661,6 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
713}; 661};
714 662
715#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \ 663#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \
716 /* SCTP_STATE_EMPTY */ \
717 TYPE_SCTP_FUNC(sctp_sf_bug), \
718 /* SCTP_STATE_CLOSED */ \ 664 /* SCTP_STATE_CLOSED */ \
719 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 665 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
720 /* SCTP_STATE_COOKIE_WAIT */ \ 666 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -722,7 +668,7 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
722 /* SCTP_STATE_COOKIE_ECHOED */ \ 668 /* SCTP_STATE_COOKIE_ECHOED */ \
723 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 669 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
724 /* SCTP_STATE_ESTABLISHED */ \ 670 /* SCTP_STATE_ESTABLISHED */ \
725 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 671 TYPE_SCTP_FUNC(sctp_sf_do_no_pending_tsn), \
726 /* SCTP_STATE_SHUTDOWN_PENDING */ \ 672 /* SCTP_STATE_SHUTDOWN_PENDING */ \
727 TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \ 673 TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \
728 /* SCTP_STATE_SHUTDOWN_SENT */ \ 674 /* SCTP_STATE_SHUTDOWN_SENT */ \
@@ -734,8 +680,6 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
734} 680}
735 681
736#define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \ 682#define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \
737 /* SCTP_STATE_EMPTY */ \
738 TYPE_SCTP_FUNC(sctp_sf_bug), \
739 /* SCTP_STATE_CLOSED */ \ 683 /* SCTP_STATE_CLOSED */ \
740 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 684 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
741 /* SCTP_STATE_COOKIE_WAIT */ \ 685 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -760,8 +704,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
760}; 704};
761 705
762#define TYPE_SCTP_EVENT_TIMEOUT_NONE { \ 706#define TYPE_SCTP_EVENT_TIMEOUT_NONE { \
763 /* SCTP_STATE_EMPTY */ \
764 TYPE_SCTP_FUNC(sctp_sf_bug), \
765 /* SCTP_STATE_CLOSED */ \ 707 /* SCTP_STATE_CLOSED */ \
766 TYPE_SCTP_FUNC(sctp_sf_bug), \ 708 TYPE_SCTP_FUNC(sctp_sf_bug), \
767 /* SCTP_STATE_COOKIE_WAIT */ \ 709 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -781,8 +723,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
781} 723}
782 724
783#define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \ 725#define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \
784 /* SCTP_STATE_EMPTY */ \
785 TYPE_SCTP_FUNC(sctp_sf_bug), \
786 /* SCTP_STATE_CLOSED */ \ 726 /* SCTP_STATE_CLOSED */ \
787 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 727 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
788 /* SCTP_STATE_COOKIE_WAIT */ \ 728 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -802,8 +742,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
802} 742}
803 743
804#define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \ 744#define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \
805 /* SCTP_STATE_EMPTY */ \
806 TYPE_SCTP_FUNC(sctp_sf_bug), \
807 /* SCTP_STATE_CLOSED */ \ 745 /* SCTP_STATE_CLOSED */ \
808 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 746 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
809 /* SCTP_STATE_COOKIE_WAIT */ \ 747 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -823,8 +761,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
823} 761}
824 762
825#define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \ 763#define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \
826 /* SCTP_STATE_EMPTY */ \
827 TYPE_SCTP_FUNC(sctp_sf_bug), \
828 /* SCTP_STATE_CLOSED */ \ 764 /* SCTP_STATE_CLOSED */ \
829 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 765 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
830 /* SCTP_STATE_COOKIE_WAIT */ \ 766 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -844,8 +780,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
844} 780}
845 781
846#define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \ 782#define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \
847 /* SCTP_STATE_EMPTY */ \
848 TYPE_SCTP_FUNC(sctp_sf_bug), \
849 /* SCTP_STATE_CLOSED */ \ 783 /* SCTP_STATE_CLOSED */ \
850 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 784 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
851 /* SCTP_STATE_COOKIE_WAIT */ \ 785 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -865,8 +799,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
865} 799}
866 800
867#define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \ 801#define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \
868 /* SCTP_STATE_EMPTY */ \
869 TYPE_SCTP_FUNC(sctp_sf_bug), \
870 /* SCTP_STATE_CLOSED */ \ 802 /* SCTP_STATE_CLOSED */ \
871 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 803 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
872 /* SCTP_STATE_COOKIE_WAIT */ \ 804 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -886,8 +818,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
886} 818}
887 819
888#define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \ 820#define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \
889 /* SCTP_STATE_EMPTY */ \
890 TYPE_SCTP_FUNC(sctp_sf_bug), \
891 /* SCTP_STATE_CLOSED */ \ 821 /* SCTP_STATE_CLOSED */ \
892 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 822 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
893 /* SCTP_STATE_COOKIE_WAIT */ \ 823 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -907,8 +837,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
907} 837}
908 838
909#define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \ 839#define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \
910 /* SCTP_STATE_EMPTY */ \
911 TYPE_SCTP_FUNC(sctp_sf_bug), \
912 /* SCTP_STATE_CLOSED */ \ 840 /* SCTP_STATE_CLOSED */ \
913 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 841 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
914 /* SCTP_STATE_COOKIE_WAIT */ \ 842 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -928,8 +856,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
928} 856}
929 857
930#define TYPE_SCTP_EVENT_TIMEOUT_SACK { \ 858#define TYPE_SCTP_EVENT_TIMEOUT_SACK { \
931 /* SCTP_STATE_EMPTY */ \
932 TYPE_SCTP_FUNC(sctp_sf_bug), \
933 /* SCTP_STATE_CLOSED */ \ 859 /* SCTP_STATE_CLOSED */ \
934 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 860 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
935 /* SCTP_STATE_COOKIE_WAIT */ \ 861 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -949,8 +875,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
949} 875}
950 876
951#define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \ 877#define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \
952 /* SCTP_STATE_EMPTY */ \
953 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
954 /* SCTP_STATE_CLOSED */ \ 878 /* SCTP_STATE_CLOSED */ \
955 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 879 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
956 /* SCTP_STATE_COOKIE_WAIT */ \ 880 /* SCTP_STATE_COOKIE_WAIT */ \
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index deb82e35a107..33d9ee629b4e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -658,11 +658,15 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
658 goto err_bindx_rem; 658 goto err_bindx_rem;
659 } 659 }
660 660
661 if (sa_addr->v4.sin_port != htons(bp->port)) { 661 if (sa_addr->v4.sin_port &&
662 sa_addr->v4.sin_port != htons(bp->port)) {
662 retval = -EINVAL; 663 retval = -EINVAL;
663 goto err_bindx_rem; 664 goto err_bindx_rem;
664 } 665 }
665 666
667 if (!sa_addr->v4.sin_port)
668 sa_addr->v4.sin_port = htons(bp->port);
669
666 /* FIXME - There is probably a need to check if sk->sk_saddr and 670 /* FIXME - There is probably a need to check if sk->sk_saddr and
667 * sk->sk_rcv_addr are currently set to one of the addresses to 671 * sk->sk_rcv_addr are currently set to one of the addresses to
668 * be removed. This is something which needs to be looked into 672 * be removed. This is something which needs to be looked into
@@ -2283,7 +2287,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2283 trans->param_flags = 2287 trans->param_flags =
2284 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2288 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2285 if (update) { 2289 if (update) {
2286 sctp_transport_pmtu(trans); 2290 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2287 sctp_assoc_sync_pmtu(asoc); 2291 sctp_assoc_sync_pmtu(asoc);
2288 } 2292 }
2289 } else if (asoc) { 2293 } else if (asoc) {
@@ -3215,14 +3219,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3215 if (optlen < sizeof(struct sctp_hmacalgo)) 3219 if (optlen < sizeof(struct sctp_hmacalgo))
3216 return -EINVAL; 3220 return -EINVAL;
3217 3221
3218 hmacs = kmalloc(optlen, GFP_KERNEL); 3222 hmacs= memdup_user(optval, optlen);
3219 if (!hmacs) 3223 if (IS_ERR(hmacs))
3220 return -ENOMEM; 3224 return PTR_ERR(hmacs);
3221
3222 if (copy_from_user(hmacs, optval, optlen)) {
3223 err = -EFAULT;
3224 goto out;
3225 }
3226 3225
3227 idents = hmacs->shmac_num_idents; 3226 idents = hmacs->shmac_num_idents;
3228 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3227 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
@@ -3257,14 +3256,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3257 if (optlen <= sizeof(struct sctp_authkey)) 3256 if (optlen <= sizeof(struct sctp_authkey))
3258 return -EINVAL; 3257 return -EINVAL;
3259 3258
3260 authkey = kmalloc(optlen, GFP_KERNEL); 3259 authkey= memdup_user(optval, optlen);
3261 if (!authkey) 3260 if (IS_ERR(authkey))
3262 return -ENOMEM; 3261 return PTR_ERR(authkey);
3263
3264 if (copy_from_user(authkey, optval, optlen)) {
3265 ret = -EFAULT;
3266 goto out;
3267 }
3268 3262
3269 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3263 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
3270 ret = -EINVAL; 3264 ret = -EINVAL;
@@ -5283,6 +5277,55 @@ static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
5283 return 0; 5277 return 0;
5284} 5278}
5285 5279
5280/*
5281 * 8.2.6. Get the Current Identifiers of Associations
5282 * (SCTP_GET_ASSOC_ID_LIST)
5283 *
5284 * This option gets the current list of SCTP association identifiers of
5285 * the SCTP associations handled by a one-to-many style socket.
5286 */
5287static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
5288 char __user *optval, int __user *optlen)
5289{
5290 struct sctp_sock *sp = sctp_sk(sk);
5291 struct sctp_association *asoc;
5292 struct sctp_assoc_ids *ids;
5293 u32 num = 0;
5294
5295 if (sctp_style(sk, TCP))
5296 return -EOPNOTSUPP;
5297
5298 if (len < sizeof(struct sctp_assoc_ids))
5299 return -EINVAL;
5300
5301 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5302 num++;
5303 }
5304
5305 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
5306 return -EINVAL;
5307
5308 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
5309
5310 ids = kmalloc(len, GFP_KERNEL);
5311 if (unlikely(!ids))
5312 return -ENOMEM;
5313
5314 ids->gaids_number_of_ids = num;
5315 num = 0;
5316 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5317 ids->gaids_assoc_id[num++] = asoc->assoc_id;
5318 }
5319
5320 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
5321 kfree(ids);
5322 return -EFAULT;
5323 }
5324
5325 kfree(ids);
5326 return 0;
5327}
5328
5286SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, 5329SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5287 char __user *optval, int __user *optlen) 5330 char __user *optval, int __user *optlen)
5288{ 5331{
@@ -5415,6 +5458,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5415 case SCTP_GET_ASSOC_NUMBER: 5458 case SCTP_GET_ASSOC_NUMBER:
5416 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 5459 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
5417 break; 5460 break;
5461 case SCTP_GET_ASSOC_ID_LIST:
5462 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
5463 break;
5418 default: 5464 default:
5419 retval = -ENOPROTOOPT; 5465 retval = -ENOPROTOOPT;
5420 break; 5466 break;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index d3ae493d234a..d8595dd1a8a7 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -211,15 +211,19 @@ void sctp_transport_set_owner(struct sctp_transport *transport,
211} 211}
212 212
213/* Initialize the pmtu of a transport. */ 213/* Initialize the pmtu of a transport. */
214void sctp_transport_pmtu(struct sctp_transport *transport) 214void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
215{ 215{
216 struct dst_entry *dst; 216 struct flowi fl;
217 217
218 dst = transport->af_specific->get_dst(NULL, &transport->ipaddr, NULL); 218 /* If we don't have a fresh route, look one up */
219 if (!transport->dst || transport->dst->obsolete > 1) {
220 dst_release(transport->dst);
221 transport->af_specific->get_dst(transport, &transport->saddr,
222 &fl, sk);
223 }
219 224
220 if (dst) { 225 if (transport->dst) {
221 transport->pathmtu = dst_mtu(dst); 226 transport->pathmtu = dst_mtu(transport->dst);
222 dst_release(dst);
223 } else 227 } else
224 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 228 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
225} 229}
@@ -270,22 +274,20 @@ void sctp_transport_route(struct sctp_transport *transport,
270{ 274{
271 struct sctp_association *asoc = transport->asoc; 275 struct sctp_association *asoc = transport->asoc;
272 struct sctp_af *af = transport->af_specific; 276 struct sctp_af *af = transport->af_specific;
273 union sctp_addr *daddr = &transport->ipaddr; 277 struct flowi fl;
274 struct dst_entry *dst;
275 278
276 dst = af->get_dst(asoc, daddr, saddr); 279 af->get_dst(transport, saddr, &fl, sctp_opt2sk(opt));
277 280
278 if (saddr) 281 if (saddr)
279 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); 282 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
280 else 283 else
281 af->get_saddr(opt, asoc, dst, daddr, &transport->saddr); 284 af->get_saddr(opt, transport, &fl);
282 285
283 transport->dst = dst;
284 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { 286 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
285 return; 287 return;
286 } 288 }
287 if (dst) { 289 if (transport->dst) {
288 transport->pathmtu = dst_mtu(dst); 290 transport->pathmtu = dst_mtu(transport->dst);
289 291
290 /* Initialize sk->sk_rcv_saddr, if the transport is the 292 /* Initialize sk->sk_rcv_saddr, if the transport is the
291 * association's active path for getsockname(). 293 * association's active path for getsockname().
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index dff27d5e22fd..c962c6062aab 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -843,7 +843,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_authkey(
843 ak = (struct sctp_authkey_event *) 843 ak = (struct sctp_authkey_event *)
844 skb_put(skb, sizeof(struct sctp_authkey_event)); 844 skb_put(skb, sizeof(struct sctp_authkey_event));
845 845
846 ak->auth_type = SCTP_AUTHENTICATION_INDICATION; 846 ak->auth_type = SCTP_AUTHENTICATION_EVENT;
847 ak->auth_flags = 0; 847 ak->auth_flags = 0;
848 ak->auth_length = sizeof(struct sctp_authkey_event); 848 ak->auth_length = sizeof(struct sctp_authkey_event);
849 849
@@ -862,6 +862,34 @@ fail:
862 return NULL; 862 return NULL;
863} 863}
864 864
865/*
866 * Socket Extensions for SCTP
867 * 6.3.10. SCTP_SENDER_DRY_EVENT
868 */
869struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
870 const struct sctp_association *asoc, gfp_t gfp)
871{
872 struct sctp_ulpevent *event;
873 struct sctp_sender_dry_event *sdry;
874 struct sk_buff *skb;
875
876 event = sctp_ulpevent_new(sizeof(struct sctp_sender_dry_event),
877 MSG_NOTIFICATION, gfp);
878 if (!event)
879 return NULL;
880
881 skb = sctp_event2skb(event);
882 sdry = (struct sctp_sender_dry_event *)
883 skb_put(skb, sizeof(struct sctp_sender_dry_event));
884
885 sdry->sender_dry_type = SCTP_SENDER_DRY_EVENT;
886 sdry->sender_dry_flags = 0;
887 sdry->sender_dry_length = sizeof(struct sctp_sender_dry_event);
888 sctp_ulpevent_set_owner(event, asoc);
889 sdry->sender_dry_assoc_id = sctp_assoc2id(asoc);
890
891 return event;
892}
865 893
866/* Return the notification type, assuming this is a notification 894/* Return the notification type, assuming this is a notification
867 * event. 895 * event.
diff --git a/net/socket.c b/net/socket.c
index 310d16b1b3c9..ed50255143d5 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -551,11 +551,10 @@ int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
551} 551}
552EXPORT_SYMBOL(sock_tx_timestamp); 552EXPORT_SYMBOL(sock_tx_timestamp);
553 553
554static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, 554static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
555 struct msghdr *msg, size_t size) 555 struct msghdr *msg, size_t size)
556{ 556{
557 struct sock_iocb *si = kiocb_to_siocb(iocb); 557 struct sock_iocb *si = kiocb_to_siocb(iocb);
558 int err;
559 558
560 sock_update_classid(sock->sk); 559 sock_update_classid(sock->sk);
561 560
@@ -564,13 +563,17 @@ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
564 si->msg = msg; 563 si->msg = msg;
565 si->size = size; 564 si->size = size;
566 565
567 err = security_socket_sendmsg(sock, msg, size);
568 if (err)
569 return err;
570
571 return sock->ops->sendmsg(iocb, sock, msg, size); 566 return sock->ops->sendmsg(iocb, sock, msg, size);
572} 567}
573 568
569static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
570 struct msghdr *msg, size_t size)
571{
572 int err = security_socket_sendmsg(sock, msg, size);
573
574 return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size);
575}
576
574int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 577int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
575{ 578{
576 struct kiocb iocb; 579 struct kiocb iocb;
@@ -586,6 +589,20 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
586} 589}
587EXPORT_SYMBOL(sock_sendmsg); 590EXPORT_SYMBOL(sock_sendmsg);
588 591
592int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size)
593{
594 struct kiocb iocb;
595 struct sock_iocb siocb;
596 int ret;
597
598 init_sync_kiocb(&iocb, NULL);
599 iocb.private = &siocb;
600 ret = __sock_sendmsg_nosec(&iocb, sock, msg, size);
601 if (-EIOCBQUEUED == ret)
602 ret = wait_on_sync_kiocb(&iocb);
603 return ret;
604}
605
589int kernel_sendmsg(struct socket *sock, struct msghdr *msg, 606int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
590 struct kvec *vec, size_t num, size_t size) 607 struct kvec *vec, size_t num, size_t size)
591{ 608{
@@ -1863,57 +1880,47 @@ SYSCALL_DEFINE2(shutdown, int, fd, int, how)
1863#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) 1880#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
1864#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) 1881#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
1865 1882
1866/* 1883static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1867 * BSD sendmsg interface 1884 struct msghdr *msg_sys, unsigned flags, int nosec)
1868 */
1869
1870SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1871{ 1885{
1872 struct compat_msghdr __user *msg_compat = 1886 struct compat_msghdr __user *msg_compat =
1873 (struct compat_msghdr __user *)msg; 1887 (struct compat_msghdr __user *)msg;
1874 struct socket *sock;
1875 struct sockaddr_storage address; 1888 struct sockaddr_storage address;
1876 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; 1889 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
1877 unsigned char ctl[sizeof(struct cmsghdr) + 20] 1890 unsigned char ctl[sizeof(struct cmsghdr) + 20]
1878 __attribute__ ((aligned(sizeof(__kernel_size_t)))); 1891 __attribute__ ((aligned(sizeof(__kernel_size_t))));
1879 /* 20 is size of ipv6_pktinfo */ 1892 /* 20 is size of ipv6_pktinfo */
1880 unsigned char *ctl_buf = ctl; 1893 unsigned char *ctl_buf = ctl;
1881 struct msghdr msg_sys;
1882 int err, ctl_len, iov_size, total_len; 1894 int err, ctl_len, iov_size, total_len;
1883 int fput_needed;
1884 1895
1885 err = -EFAULT; 1896 err = -EFAULT;
1886 if (MSG_CMSG_COMPAT & flags) { 1897 if (MSG_CMSG_COMPAT & flags) {
1887 if (get_compat_msghdr(&msg_sys, msg_compat)) 1898 if (get_compat_msghdr(msg_sys, msg_compat))
1888 return -EFAULT; 1899 return -EFAULT;
1889 } else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) 1900 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
1890 return -EFAULT; 1901 return -EFAULT;
1891 1902
1892 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1893 if (!sock)
1894 goto out;
1895
1896 /* do not move before msg_sys is valid */ 1903 /* do not move before msg_sys is valid */
1897 err = -EMSGSIZE; 1904 err = -EMSGSIZE;
1898 if (msg_sys.msg_iovlen > UIO_MAXIOV) 1905 if (msg_sys->msg_iovlen > UIO_MAXIOV)
1899 goto out_put; 1906 goto out;
1900 1907
1901 /* Check whether to allocate the iovec area */ 1908 /* Check whether to allocate the iovec area */
1902 err = -ENOMEM; 1909 err = -ENOMEM;
1903 iov_size = msg_sys.msg_iovlen * sizeof(struct iovec); 1910 iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
1904 if (msg_sys.msg_iovlen > UIO_FASTIOV) { 1911 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
1905 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); 1912 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
1906 if (!iov) 1913 if (!iov)
1907 goto out_put; 1914 goto out;
1908 } 1915 }
1909 1916
1910 /* This will also move the address data into kernel space */ 1917 /* This will also move the address data into kernel space */
1911 if (MSG_CMSG_COMPAT & flags) { 1918 if (MSG_CMSG_COMPAT & flags) {
1912 err = verify_compat_iovec(&msg_sys, iov, 1919 err = verify_compat_iovec(msg_sys, iov,
1913 (struct sockaddr *)&address, 1920 (struct sockaddr *)&address,
1914 VERIFY_READ); 1921 VERIFY_READ);
1915 } else 1922 } else
1916 err = verify_iovec(&msg_sys, iov, 1923 err = verify_iovec(msg_sys, iov,
1917 (struct sockaddr *)&address, 1924 (struct sockaddr *)&address,
1918 VERIFY_READ); 1925 VERIFY_READ);
1919 if (err < 0) 1926 if (err < 0)
@@ -1922,17 +1929,17 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1922 1929
1923 err = -ENOBUFS; 1930 err = -ENOBUFS;
1924 1931
1925 if (msg_sys.msg_controllen > INT_MAX) 1932 if (msg_sys->msg_controllen > INT_MAX)
1926 goto out_freeiov; 1933 goto out_freeiov;
1927 ctl_len = msg_sys.msg_controllen; 1934 ctl_len = msg_sys->msg_controllen;
1928 if ((MSG_CMSG_COMPAT & flags) && ctl_len) { 1935 if ((MSG_CMSG_COMPAT & flags) && ctl_len) {
1929 err = 1936 err =
1930 cmsghdr_from_user_compat_to_kern(&msg_sys, sock->sk, ctl, 1937 cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl,
1931 sizeof(ctl)); 1938 sizeof(ctl));
1932 if (err) 1939 if (err)
1933 goto out_freeiov; 1940 goto out_freeiov;
1934 ctl_buf = msg_sys.msg_control; 1941 ctl_buf = msg_sys->msg_control;
1935 ctl_len = msg_sys.msg_controllen; 1942 ctl_len = msg_sys->msg_controllen;
1936 } else if (ctl_len) { 1943 } else if (ctl_len) {
1937 if (ctl_len > sizeof(ctl)) { 1944 if (ctl_len > sizeof(ctl)) {
1938 ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); 1945 ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
@@ -1941,21 +1948,22 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1941 } 1948 }
1942 err = -EFAULT; 1949 err = -EFAULT;
1943 /* 1950 /*
1944 * Careful! Before this, msg_sys.msg_control contains a user pointer. 1951 * Careful! Before this, msg_sys->msg_control contains a user pointer.
1945 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted 1952 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted
1946 * checking falls down on this. 1953 * checking falls down on this.
1947 */ 1954 */
1948 if (copy_from_user(ctl_buf, 1955 if (copy_from_user(ctl_buf,
1949 (void __user __force *)msg_sys.msg_control, 1956 (void __user __force *)msg_sys->msg_control,
1950 ctl_len)) 1957 ctl_len))
1951 goto out_freectl; 1958 goto out_freectl;
1952 msg_sys.msg_control = ctl_buf; 1959 msg_sys->msg_control = ctl_buf;
1953 } 1960 }
1954 msg_sys.msg_flags = flags; 1961 msg_sys->msg_flags = flags;
1955 1962
1956 if (sock->file->f_flags & O_NONBLOCK) 1963 if (sock->file->f_flags & O_NONBLOCK)
1957 msg_sys.msg_flags |= MSG_DONTWAIT; 1964 msg_sys->msg_flags |= MSG_DONTWAIT;
1958 err = sock_sendmsg(sock, &msg_sys, total_len); 1965 err = (nosec ? sock_sendmsg_nosec : sock_sendmsg)(sock, msg_sys,
1966 total_len);
1959 1967
1960out_freectl: 1968out_freectl:
1961 if (ctl_buf != ctl) 1969 if (ctl_buf != ctl)
@@ -1963,12 +1971,114 @@ out_freectl:
1963out_freeiov: 1971out_freeiov:
1964 if (iov != iovstack) 1972 if (iov != iovstack)
1965 sock_kfree_s(sock->sk, iov, iov_size); 1973 sock_kfree_s(sock->sk, iov, iov_size);
1966out_put: 1974out:
1975 return err;
1976}
1977
1978/*
1979 * BSD sendmsg interface
1980 */
1981
1982SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1983{
1984 int fput_needed, err;
1985 struct msghdr msg_sys;
1986 struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
1987
1988 if (!sock)
1989 goto out;
1990
1991 err = __sys_sendmsg(sock, msg, &msg_sys, flags, 0);
1992
1967 fput_light(sock->file, fput_needed); 1993 fput_light(sock->file, fput_needed);
1968out: 1994out:
1969 return err; 1995 return err;
1970} 1996}
1971 1997
1998/*
1999 * Linux sendmmsg interface
2000 */
2001
2002int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2003 unsigned int flags)
2004{
2005 int fput_needed, err, datagrams;
2006 struct socket *sock;
2007 struct mmsghdr __user *entry;
2008 struct compat_mmsghdr __user *compat_entry;
2009 struct msghdr msg_sys;
2010
2011 datagrams = 0;
2012
2013 sock = sockfd_lookup_light(fd, &err, &fput_needed);
2014 if (!sock)
2015 return err;
2016
2017 err = sock_error(sock->sk);
2018 if (err)
2019 goto out_put;
2020
2021 entry = mmsg;
2022 compat_entry = (struct compat_mmsghdr __user *)mmsg;
2023
2024 while (datagrams < vlen) {
2025 /*
2026 * No need to ask LSM for more than the first datagram.
2027 */
2028 if (MSG_CMSG_COMPAT & flags) {
2029 err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
2030 &msg_sys, flags, datagrams);
2031 if (err < 0)
2032 break;
2033 err = __put_user(err, &compat_entry->msg_len);
2034 ++compat_entry;
2035 } else {
2036 err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
2037 &msg_sys, flags, datagrams);
2038 if (err < 0)
2039 break;
2040 err = put_user(err, &entry->msg_len);
2041 ++entry;
2042 }
2043
2044 if (err)
2045 break;
2046 ++datagrams;
2047 }
2048
2049out_put:
2050 fput_light(sock->file, fput_needed);
2051
2052 if (err == 0)
2053 return datagrams;
2054
2055 if (datagrams != 0) {
2056 /*
2057 * We may send less entries than requested (vlen) if the
2058 * sock is non blocking...
2059 */
2060 if (err != -EAGAIN) {
2061 /*
2062 * ... or if sendmsg returns an error after we
2063 * send some datagrams, where we record the
2064 * error to return on the next call or if the
2065 * app asks about it using getsockopt(SO_ERROR).
2066 */
2067 sock->sk->sk_err = -err;
2068 }
2069
2070 return datagrams;
2071 }
2072
2073 return err;
2074}
2075
2076SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
2077 unsigned int, vlen, unsigned int, flags)
2078{
2079 return __sys_sendmmsg(fd, mmsg, vlen, flags);
2080}
2081
1972static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, 2082static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
1973 struct msghdr *msg_sys, unsigned flags, int nosec) 2083 struct msghdr *msg_sys, unsigned flags, int nosec)
1974{ 2084{
@@ -2214,11 +2324,11 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
2214#ifdef __ARCH_WANT_SYS_SOCKETCALL 2324#ifdef __ARCH_WANT_SYS_SOCKETCALL
2215/* Argument list sizes for sys_socketcall */ 2325/* Argument list sizes for sys_socketcall */
2216#define AL(x) ((x) * sizeof(unsigned long)) 2326#define AL(x) ((x) * sizeof(unsigned long))
2217static const unsigned char nargs[20] = { 2327static const unsigned char nargs[21] = {
2218 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), 2328 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
2219 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), 2329 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
2220 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), 2330 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
2221 AL(4), AL(5) 2331 AL(4), AL(5), AL(4)
2222}; 2332};
2223 2333
2224#undef AL 2334#undef AL
@@ -2238,7 +2348,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2238 int err; 2348 int err;
2239 unsigned int len; 2349 unsigned int len;
2240 2350
2241 if (call < 1 || call > SYS_RECVMMSG) 2351 if (call < 1 || call > SYS_SENDMMSG)
2242 return -EINVAL; 2352 return -EINVAL;
2243 2353
2244 len = nargs[call]; 2354 len = nargs[call];
@@ -2313,6 +2423,9 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2313 case SYS_SENDMSG: 2423 case SYS_SENDMSG:
2314 err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); 2424 err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]);
2315 break; 2425 break;
2426 case SYS_SENDMMSG:
2427 err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]);
2428 break;
2316 case SYS_RECVMSG: 2429 case SYS_RECVMSG:
2317 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); 2430 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]);
2318 break; 2431 break;
@@ -2643,13 +2756,13 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2643 return -EFAULT; 2756 return -EFAULT;
2644 2757
2645 if (convert_in) { 2758 if (convert_in) {
2646 /* We expect there to be holes between fs.m_u and 2759 /* We expect there to be holes between fs.m_ext and
2647 * fs.ring_cookie and at the end of fs, but nowhere else. 2760 * fs.ring_cookie and at the end of fs, but nowhere else.
2648 */ 2761 */
2649 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_u) + 2762 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) +
2650 sizeof(compat_rxnfc->fs.m_u) != 2763 sizeof(compat_rxnfc->fs.m_ext) !=
2651 offsetof(struct ethtool_rxnfc, fs.m_u) + 2764 offsetof(struct ethtool_rxnfc, fs.m_ext) +
2652 sizeof(rxnfc->fs.m_u)); 2765 sizeof(rxnfc->fs.m_ext));
2653 BUILD_BUG_ON( 2766 BUILD_BUG_ON(
2654 offsetof(struct compat_ethtool_rxnfc, fs.location) - 2767 offsetof(struct compat_ethtool_rxnfc, fs.location) -
2655 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != 2768 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
@@ -2657,7 +2770,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2657 offsetof(struct ethtool_rxnfc, fs.ring_cookie)); 2770 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
2658 2771
2659 if (copy_in_user(rxnfc, compat_rxnfc, 2772 if (copy_in_user(rxnfc, compat_rxnfc,
2660 (void *)(&rxnfc->fs.m_u + 1) - 2773 (void *)(&rxnfc->fs.m_ext + 1) -
2661 (void *)rxnfc) || 2774 (void *)rxnfc) ||
2662 copy_in_user(&rxnfc->fs.ring_cookie, 2775 copy_in_user(&rxnfc->fs.ring_cookie,
2663 &compat_rxnfc->fs.ring_cookie, 2776 &compat_rxnfc->fs.ring_cookie,
@@ -2674,7 +2787,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2674 2787
2675 if (convert_out) { 2788 if (convert_out) {
2676 if (copy_in_user(compat_rxnfc, rxnfc, 2789 if (copy_in_user(compat_rxnfc, rxnfc,
2677 (const void *)(&rxnfc->fs.m_u + 1) - 2790 (const void *)(&rxnfc->fs.m_ext + 1) -
2678 (const void *)rxnfc) || 2791 (const void *)rxnfc) ||
2679 copy_in_user(&compat_rxnfc->fs.ring_cookie, 2792 copy_in_user(&compat_rxnfc->fs.ring_cookie,
2680 &rxnfc->fs.ring_cookie, 2793 &rxnfc->fs.ring_cookie,
diff --git a/net/wireless/core.c b/net/wireless/core.c
index fe01de29bfe8..bbf1fa11107a 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -46,6 +46,11 @@ static struct dentry *ieee80211_debugfs_dir;
46/* for the cleanup, scan and event works */ 46/* for the cleanup, scan and event works */
47struct workqueue_struct *cfg80211_wq; 47struct workqueue_struct *cfg80211_wq;
48 48
49static bool cfg80211_disable_40mhz_24ghz;
50module_param(cfg80211_disable_40mhz_24ghz, bool, 0644);
51MODULE_PARM_DESC(cfg80211_disable_40mhz_24ghz,
52 "Disable 40MHz support in the 2.4GHz band");
53
49/* requires cfg80211_mutex to be held! */ 54/* requires cfg80211_mutex to be held! */
50struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx) 55struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
51{ 56{
@@ -451,6 +456,18 @@ int wiphy_register(struct wiphy *wiphy)
451 return -EINVAL; 456 return -EINVAL;
452 457
453 /* 458 /*
459 * Since cfg80211_disable_40mhz_24ghz is global, we can
460 * modify the sband's ht data even if the driver uses a
461 * global structure for that.
462 */
463 if (cfg80211_disable_40mhz_24ghz &&
464 band == IEEE80211_BAND_2GHZ &&
465 sband->ht_cap.ht_supported) {
466 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
467 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
468 }
469
470 /*
454 * Since we use a u32 for rate bitmaps in 471 * Since we use a u32 for rate bitmaps in
455 * ieee80211_get_response_rate, we cannot 472 * ieee80211_get_response_rate, we cannot
456 * have more than 32 legacy rates. 473 * have more than 32 legacy rates.
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 73e39c171ffb..5c116083eeca 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -1,5 +1,6 @@
1#include <linux/ieee80211.h> 1#include <linux/ieee80211.h>
2#include <net/cfg80211.h> 2#include <net/cfg80211.h>
3#include "nl80211.h"
3#include "core.h" 4#include "core.h"
4 5
5/* Default values, timeouts in ms */ 6/* Default values, timeouts in ms */
@@ -53,8 +54,9 @@ const struct mesh_config default_mesh_config = {
53const struct mesh_setup default_mesh_setup = { 54const struct mesh_setup default_mesh_setup = {
54 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, 55 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
55 .path_metric = IEEE80211_PATH_METRIC_AIRTIME, 56 .path_metric = IEEE80211_PATH_METRIC_AIRTIME,
56 .vendor_ie = NULL, 57 .ie = NULL,
57 .vendor_ie_len = 0, 58 .ie_len = 0,
59 .is_secure = false,
58}; 60};
59 61
60int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 62int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
@@ -72,6 +74,10 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
72 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) 74 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
73 return -EOPNOTSUPP; 75 return -EOPNOTSUPP;
74 76
77 if (!(rdev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
78 setup->is_secure)
79 return -EOPNOTSUPP;
80
75 if (wdev->mesh_id_len) 81 if (wdev->mesh_id_len)
76 return -EALREADY; 82 return -EALREADY;
77 83
@@ -105,6 +111,19 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
105 return err; 111 return err;
106} 112}
107 113
114void cfg80211_notify_new_peer_candidate(struct net_device *dev,
115 const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp)
116{
117 struct wireless_dev *wdev = dev->ieee80211_ptr;
118
119 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT))
120 return;
121
122 nl80211_send_new_peer_candidate(wiphy_to_dev(wdev->wiphy), dev,
123 macaddr, ie, ie_len, gfp);
124}
125EXPORT_SYMBOL(cfg80211_notify_new_peer_candidate);
126
108static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 127static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
109 struct net_device *dev) 128 struct net_device *dev)
110{ 129{
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index aa5df8865ff7..16881fea4ce6 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -770,6 +770,15 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
770} 770}
771EXPORT_SYMBOL(cfg80211_new_sta); 771EXPORT_SYMBOL(cfg80211_new_sta);
772 772
773void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
774{
775 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
776 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
777
778 nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp);
779}
780EXPORT_SYMBOL(cfg80211_del_sta);
781
773struct cfg80211_mgmt_registration { 782struct cfg80211_mgmt_registration {
774 struct list_head list; 783 struct list_head list;
775 784
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4ebce4284e9d..0efa7fd01150 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -124,6 +124,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
124 [NL80211_ATTR_BSS_HT_OPMODE] = { .type = NLA_U16 }, 124 [NL80211_ATTR_BSS_HT_OPMODE] = { .type = NLA_U16 },
125 125
126 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED }, 126 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED },
127 [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG },
127 128
128 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY, 129 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY,
129 .len = NL80211_HT_CAPABILITY_LEN }, 130 .len = NL80211_HT_CAPABILITY_LEN },
@@ -594,6 +595,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
594 595
595 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) 596 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
596 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN); 597 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
598 if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH)
599 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH);
597 600
598 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, 601 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
599 sizeof(u32) * dev->wiphy.n_cipher_suites, 602 sizeof(u32) * dev->wiphy.n_cipher_suites,
@@ -1922,6 +1925,7 @@ static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
1922 [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG }, 1925 [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG },
1923 [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG }, 1926 [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG },
1924 [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG }, 1927 [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG },
1928 [NL80211_STA_FLAG_AUTHENTICATED] = { .type = NLA_FLAG },
1925}; 1929};
1926 1930
1927static int parse_station_flags(struct genl_info *info, 1931static int parse_station_flags(struct genl_info *info,
@@ -2002,7 +2006,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2002 const u8 *mac_addr, struct station_info *sinfo) 2006 const u8 *mac_addr, struct station_info *sinfo)
2003{ 2007{
2004 void *hdr; 2008 void *hdr;
2005 struct nlattr *sinfoattr; 2009 struct nlattr *sinfoattr, *bss_param;
2006 2010
2007 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); 2011 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
2008 if (!hdr) 2012 if (!hdr)
@@ -2016,6 +2020,9 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2016 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); 2020 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
2017 if (!sinfoattr) 2021 if (!sinfoattr)
2018 goto nla_put_failure; 2022 goto nla_put_failure;
2023 if (sinfo->filled & STATION_INFO_CONNECTED_TIME)
2024 NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME,
2025 sinfo->connected_time);
2019 if (sinfo->filled & STATION_INFO_INACTIVE_TIME) 2026 if (sinfo->filled & STATION_INFO_INACTIVE_TIME)
2020 NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, 2027 NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME,
2021 sinfo->inactive_time); 2028 sinfo->inactive_time);
@@ -2062,6 +2069,25 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2062 if (sinfo->filled & STATION_INFO_TX_FAILED) 2069 if (sinfo->filled & STATION_INFO_TX_FAILED)
2063 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED, 2070 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED,
2064 sinfo->tx_failed); 2071 sinfo->tx_failed);
2072 if (sinfo->filled & STATION_INFO_BSS_PARAM) {
2073 bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
2074 if (!bss_param)
2075 goto nla_put_failure;
2076
2077 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT)
2078 NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT);
2079 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE)
2080 NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE);
2081 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME)
2082 NLA_PUT_FLAG(msg,
2083 NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME);
2084 NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
2085 sinfo->bss_param.dtim_period);
2086 NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
2087 sinfo->bss_param.beacon_interval);
2088
2089 nla_nest_end(msg, bss_param);
2090 }
2065 nla_nest_end(msg, sinfoattr); 2091 nla_nest_end(msg, sinfoattr);
2066 2092
2067 return genlmsg_end(msg, hdr); 2093 return genlmsg_end(msg, hdr);
@@ -2262,7 +2288,9 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2262 err = -EINVAL; 2288 err = -EINVAL;
2263 if (params.supported_rates) 2289 if (params.supported_rates)
2264 err = -EINVAL; 2290 err = -EINVAL;
2265 if (params.sta_flags_mask) 2291 if (params.sta_flags_mask &
2292 ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) |
2293 BIT(NL80211_STA_FLAG_AUTHORIZED)))
2266 err = -EINVAL; 2294 err = -EINVAL;
2267 break; 2295 break;
2268 default: 2296 default:
@@ -2324,11 +2352,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2324 params.ht_capa = 2352 params.ht_capa =
2325 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 2353 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
2326 2354
2355 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
2356 params.plink_action =
2357 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
2358
2327 if (parse_station_flags(info, &params)) 2359 if (parse_station_flags(info, &params))
2328 return -EINVAL; 2360 return -EINVAL;
2329 2361
2330 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2362 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2331 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && 2363 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
2364 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
2332 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 2365 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2333 return -EINVAL; 2366 return -EINVAL;
2334 2367
@@ -2804,7 +2837,8 @@ static const struct nla_policy
2804 nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = { 2837 nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = {
2805 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, 2838 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 },
2806 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, 2839 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
2807 [NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE] = { .type = NLA_BINARY, 2840 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
2841 [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
2808 .len = IEEE80211_MAX_DATA_LEN }, 2842 .len = IEEE80211_MAX_DATA_LEN },
2809}; 2843};
2810 2844
@@ -2906,14 +2940,16 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
2906 IEEE80211_PATH_METRIC_VENDOR : 2940 IEEE80211_PATH_METRIC_VENDOR :
2907 IEEE80211_PATH_METRIC_AIRTIME; 2941 IEEE80211_PATH_METRIC_AIRTIME;
2908 2942
2909 if (tb[NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE]) { 2943
2944 if (tb[NL80211_MESH_SETUP_IE]) {
2910 struct nlattr *ieattr = 2945 struct nlattr *ieattr =
2911 tb[NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE]; 2946 tb[NL80211_MESH_SETUP_IE];
2912 if (!is_valid_ie_attr(ieattr)) 2947 if (!is_valid_ie_attr(ieattr))
2913 return -EINVAL; 2948 return -EINVAL;
2914 setup->vendor_ie = nla_data(ieattr); 2949 setup->ie = nla_data(ieattr);
2915 setup->vendor_ie_len = nla_len(ieattr); 2950 setup->ie_len = nla_len(ieattr);
2916 } 2951 }
2952 setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]);
2917 2953
2918 return 0; 2954 return 0;
2919} 2955}
@@ -5785,6 +5821,44 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
5785 nlmsg_free(msg); 5821 nlmsg_free(msg);
5786} 5822}
5787 5823
5824void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
5825 struct net_device *netdev,
5826 const u8 *macaddr, const u8* ie, u8 ie_len,
5827 gfp_t gfp)
5828{
5829 struct sk_buff *msg;
5830 void *hdr;
5831
5832 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
5833 if (!msg)
5834 return;
5835
5836 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NEW_PEER_CANDIDATE);
5837 if (!hdr) {
5838 nlmsg_free(msg);
5839 return;
5840 }
5841
5842 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5843 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5844 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr);
5845 if (ie_len && ie)
5846 NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
5847
5848 if (genlmsg_end(msg, hdr) < 0) {
5849 nlmsg_free(msg);
5850 return;
5851 }
5852
5853 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5854 nl80211_mlme_mcgrp.id, gfp);
5855 return;
5856
5857 nla_put_failure:
5858 genlmsg_cancel(msg, hdr);
5859 nlmsg_free(msg);
5860}
5861
5788void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, 5862void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
5789 struct net_device *netdev, const u8 *addr, 5863 struct net_device *netdev, const u8 *addr,
5790 enum nl80211_key_type key_type, int key_id, 5864 enum nl80211_key_type key_type, int key_id,
@@ -5966,6 +6040,40 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
5966 nl80211_mlme_mcgrp.id, gfp); 6040 nl80211_mlme_mcgrp.id, gfp);
5967} 6041}
5968 6042
6043void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
6044 struct net_device *dev, const u8 *mac_addr,
6045 gfp_t gfp)
6046{
6047 struct sk_buff *msg;
6048 void *hdr;
6049
6050 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
6051 if (!msg)
6052 return;
6053
6054 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DEL_STATION);
6055 if (!hdr) {
6056 nlmsg_free(msg);
6057 return;
6058 }
6059
6060 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
6061 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
6062
6063 if (genlmsg_end(msg, hdr) < 0) {
6064 nlmsg_free(msg);
6065 return;
6066 }
6067
6068 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
6069 nl80211_mlme_mcgrp.id, gfp);
6070 return;
6071
6072 nla_put_failure:
6073 genlmsg_cancel(msg, hdr);
6074 nlmsg_free(msg);
6075}
6076
5969int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 6077int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
5970 struct net_device *netdev, u32 nlpid, 6078 struct net_device *netdev, u32 nlpid,
5971 int freq, const u8 *buf, size_t len, gfp_t gfp) 6079 int freq, const u8 *buf, size_t len, gfp_t gfp)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index e3f7fa886966..f2af6955a665 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -50,6 +50,10 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
50 struct net_device *netdev, u16 reason, 50 struct net_device *netdev, u16 reason,
51 const u8 *ie, size_t ie_len, bool from_ap); 51 const u8 *ie, size_t ie_len, bool from_ap);
52 52
53void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
54 struct net_device *netdev,
55 const u8 *macaddr, const u8* ie, u8 ie_len,
56 gfp_t gfp);
53void 57void
54nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, 58nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
55 struct net_device *netdev, const u8 *addr, 59 struct net_device *netdev, const u8 *addr,
@@ -79,6 +83,9 @@ void nl80211_send_remain_on_channel_cancel(
79void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, 83void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
80 struct net_device *dev, const u8 *mac_addr, 84 struct net_device *dev, const u8 *mac_addr,
81 struct station_info *sinfo, gfp_t gfp); 85 struct station_info *sinfo, gfp_t gfp);
86void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
87 struct net_device *dev, const u8 *mac_addr,
88 gfp_t gfp);
82 89
83int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 90int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
84 struct net_device *netdev, u32 nlpid, int freq, 91 struct net_device *netdev, u32 nlpid, int freq,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index ab801a1097b2..1613080a96b9 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -106,6 +106,9 @@ struct reg_beacon {
106static void reg_todo(struct work_struct *work); 106static void reg_todo(struct work_struct *work);
107static DECLARE_WORK(reg_work, reg_todo); 107static DECLARE_WORK(reg_work, reg_todo);
108 108
109static void reg_timeout_work(struct work_struct *work);
110static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
111
109/* We keep a static world regulatory domain in case of the absence of CRDA */ 112/* We keep a static world regulatory domain in case of the absence of CRDA */
110static const struct ieee80211_regdomain world_regdom = { 113static const struct ieee80211_regdomain world_regdom = {
111 .n_reg_rules = 5, 114 .n_reg_rules = 5,
@@ -1330,6 +1333,9 @@ static void reg_set_request_processed(void)
1330 need_more_processing = true; 1333 need_more_processing = true;
1331 spin_unlock(&reg_requests_lock); 1334 spin_unlock(&reg_requests_lock);
1332 1335
1336 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
1337 cancel_delayed_work_sync(&reg_timeout);
1338
1333 if (need_more_processing) 1339 if (need_more_processing)
1334 schedule_work(&reg_work); 1340 schedule_work(&reg_work);
1335} 1341}
@@ -1440,8 +1446,18 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1440 r = __regulatory_hint(wiphy, reg_request); 1446 r = __regulatory_hint(wiphy, reg_request);
1441 /* This is required so that the orig_* parameters are saved */ 1447 /* This is required so that the orig_* parameters are saved */
1442 if (r == -EALREADY && wiphy && 1448 if (r == -EALREADY && wiphy &&
1443 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) 1449 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
1444 wiphy_update_regulatory(wiphy, initiator); 1450 wiphy_update_regulatory(wiphy, initiator);
1451 return;
1452 }
1453
1454 /*
1455 * We only time out user hints, given that they should be the only
1456 * source of bogus requests.
1457 */
1458 if (r != -EALREADY &&
1459 reg_request->initiator == NL80211_REGDOM_SET_BY_USER)
1460 schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
1445} 1461}
1446 1462
1447/* 1463/*
@@ -1744,6 +1760,8 @@ static void restore_regulatory_settings(bool reset_user)
1744{ 1760{
1745 char alpha2[2]; 1761 char alpha2[2];
1746 struct reg_beacon *reg_beacon, *btmp; 1762 struct reg_beacon *reg_beacon, *btmp;
1763 struct regulatory_request *reg_request, *tmp;
1764 LIST_HEAD(tmp_reg_req_list);
1747 1765
1748 mutex_lock(&cfg80211_mutex); 1766 mutex_lock(&cfg80211_mutex);
1749 mutex_lock(&reg_mutex); 1767 mutex_lock(&reg_mutex);
@@ -1751,6 +1769,25 @@ static void restore_regulatory_settings(bool reset_user)
1751 reset_regdomains(); 1769 reset_regdomains();
1752 restore_alpha2(alpha2, reset_user); 1770 restore_alpha2(alpha2, reset_user);
1753 1771
1772 /*
1773 * If there's any pending requests we simply
1774 * stash them to a temporary pending queue and
1775 * add then after we've restored regulatory
1776 * settings.
1777 */
1778 spin_lock(&reg_requests_lock);
1779 if (!list_empty(&reg_requests_list)) {
1780 list_for_each_entry_safe(reg_request, tmp,
1781 &reg_requests_list, list) {
1782 if (reg_request->initiator !=
1783 NL80211_REGDOM_SET_BY_USER)
1784 continue;
1785 list_del(&reg_request->list);
1786 list_add_tail(&reg_request->list, &tmp_reg_req_list);
1787 }
1788 }
1789 spin_unlock(&reg_requests_lock);
1790
1754 /* Clear beacon hints */ 1791 /* Clear beacon hints */
1755 spin_lock_bh(&reg_pending_beacons_lock); 1792 spin_lock_bh(&reg_pending_beacons_lock);
1756 if (!list_empty(&reg_pending_beacons)) { 1793 if (!list_empty(&reg_pending_beacons)) {
@@ -1785,8 +1822,31 @@ static void restore_regulatory_settings(bool reset_user)
1785 */ 1822 */
1786 if (is_an_alpha2(alpha2)) 1823 if (is_an_alpha2(alpha2))
1787 regulatory_hint_user(user_alpha2); 1824 regulatory_hint_user(user_alpha2);
1788}
1789 1825
1826 if (list_empty(&tmp_reg_req_list))
1827 return;
1828
1829 mutex_lock(&cfg80211_mutex);
1830 mutex_lock(&reg_mutex);
1831
1832 spin_lock(&reg_requests_lock);
1833 list_for_each_entry_safe(reg_request, tmp, &tmp_reg_req_list, list) {
1834 REG_DBG_PRINT("Adding request for country %c%c back "
1835 "into the queue\n",
1836 reg_request->alpha2[0],
1837 reg_request->alpha2[1]);
1838 list_del(&reg_request->list);
1839 list_add_tail(&reg_request->list, &reg_requests_list);
1840 }
1841 spin_unlock(&reg_requests_lock);
1842
1843 mutex_unlock(&reg_mutex);
1844 mutex_unlock(&cfg80211_mutex);
1845
1846 REG_DBG_PRINT("Kicking the queue\n");
1847
1848 schedule_work(&reg_work);
1849}
1790 1850
1791void regulatory_hint_disconnect(void) 1851void regulatory_hint_disconnect(void)
1792{ 1852{
@@ -2125,6 +2185,13 @@ out:
2125 mutex_unlock(&reg_mutex); 2185 mutex_unlock(&reg_mutex);
2126} 2186}
2127 2187
2188static void reg_timeout_work(struct work_struct *work)
2189{
2190 REG_DBG_PRINT("Timeout while waiting for CRDA to reply, "
2191 "restoring regulatory settings");
2192 restore_regulatory_settings(true);
2193}
2194
2128int __init regulatory_init(void) 2195int __init regulatory_init(void)
2129{ 2196{
2130 int err = 0; 2197 int err = 0;
@@ -2178,6 +2245,7 @@ void /* __init_or_exit */ regulatory_exit(void)
2178 struct reg_beacon *reg_beacon, *btmp; 2245 struct reg_beacon *reg_beacon, *btmp;
2179 2246
2180 cancel_work_sync(&reg_work); 2247 cancel_work_sync(&reg_work);
2248 cancel_delayed_work_sync(&reg_timeout);
2181 2249
2182 mutex_lock(&cfg80211_mutex); 2250 mutex_lock(&cfg80211_mutex);
2183 mutex_lock(&reg_mutex); 2251 mutex_lock(&reg_mutex);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 15792d8b6272..00bcb88386c2 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1348,7 +1348,8 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1348 default: 1348 default:
1349 BUG(); 1349 BUG();
1350 } 1350 }
1351 xdst = dst_alloc(dst_ops, 0); 1351 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
1352 memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry));
1352 xfrm_policy_put_afinfo(afinfo); 1353 xfrm_policy_put_afinfo(afinfo);
1353 1354
1354 if (likely(xdst)) 1355 if (likely(xdst))
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index dd78536d40de..d70f85eb7864 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1036,15 +1036,15 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
1036 1036
1037 case AF_INET6: 1037 case AF_INET6:
1038 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6, 1038 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1039 (struct in6_addr *)daddr); 1039 (const struct in6_addr *)daddr);
1040 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6, 1040 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1041 (struct in6_addr *)saddr); 1041 (const struct in6_addr *)saddr);
1042 x->sel.prefixlen_d = 128; 1042 x->sel.prefixlen_d = 128;
1043 x->sel.prefixlen_s = 128; 1043 x->sel.prefixlen_s = 128;
1044 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6, 1044 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1045 (struct in6_addr *)saddr); 1045 (const struct in6_addr *)saddr);
1046 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6, 1046 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1047 (struct in6_addr *)daddr); 1047 (const struct in6_addr *)daddr);
1048 break; 1048 break;
1049 } 1049 }
1050 1050
@@ -2092,8 +2092,8 @@ static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2092static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, 2092static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2093 struct audit_buffer *audit_buf) 2093 struct audit_buffer *audit_buf)
2094{ 2094{
2095 struct iphdr *iph4; 2095 const struct iphdr *iph4;
2096 struct ipv6hdr *iph6; 2096 const struct ipv6hdr *iph6;
2097 2097
2098 switch (family) { 2098 switch (family) {
2099 case AF_INET: 2099 case AF_INET: